4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
51 #include <rte_atomic.h>
52 #include <rte_malloc.h>
55 #include "e1000_logs.h"
56 #include "base/e1000_api.h"
57 #include "e1000_ethdev.h"
61 * Default values for port configuration
63 #define IGB_DEFAULT_RX_FREE_THRESH 32
64 #define IGB_DEFAULT_RX_PTHRESH 8
65 #define IGB_DEFAULT_RX_HTHRESH 8
66 #define IGB_DEFAULT_RX_WTHRESH 0
68 #define IGB_DEFAULT_TX_PTHRESH 32
69 #define IGB_DEFAULT_TX_HTHRESH 0
70 #define IGB_DEFAULT_TX_WTHRESH 0
72 #define IGB_HKEY_MAX_INDEX 10
74 /* Bit shift and mask */
75 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
76 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
77 #define IGB_8_BIT_WIDTH CHAR_BIT
78 #define IGB_8_BIT_MASK UINT8_MAX
80 /* Additional timesync values. */
81 #define E1000_ETQF_FILTER_1588 3
82 #define E1000_TIMINCA_INCVALUE 16000000
83 #define E1000_TIMINCA_INIT ((0x02 << E1000_TIMINCA_16NS_SHIFT) \
84 | E1000_TIMINCA_INCVALUE)
86 static int eth_igb_configure(struct rte_eth_dev *dev);
87 static int eth_igb_start(struct rte_eth_dev *dev);
88 static void eth_igb_stop(struct rte_eth_dev *dev);
89 static void eth_igb_close(struct rte_eth_dev *dev);
90 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
91 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
92 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
93 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
94 static int eth_igb_link_update(struct rte_eth_dev *dev,
95 int wait_to_complete);
96 static void eth_igb_stats_get(struct rte_eth_dev *dev,
97 struct rte_eth_stats *rte_stats);
98 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
99 static void eth_igb_infos_get(struct rte_eth_dev *dev,
100 struct rte_eth_dev_info *dev_info);
101 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
102 struct rte_eth_dev_info *dev_info);
103 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
104 struct rte_eth_fc_conf *fc_conf);
105 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
106 struct rte_eth_fc_conf *fc_conf);
107 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
109 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
111 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
112 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
113 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
115 static int igb_hardware_init(struct e1000_hw *hw);
116 static void igb_hw_control_acquire(struct e1000_hw *hw);
117 static void igb_hw_control_release(struct e1000_hw *hw);
118 static void igb_init_manageability(struct e1000_hw *hw);
119 static void igb_release_manageability(struct e1000_hw *hw);
121 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
123 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
124 uint16_t vlan_id, int on);
125 static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
126 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
128 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
129 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
130 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
131 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
132 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
133 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
135 static int eth_igb_led_on(struct rte_eth_dev *dev);
136 static int eth_igb_led_off(struct rte_eth_dev *dev);
138 static void igb_intr_disable(struct e1000_hw *hw);
139 static int igb_get_rx_buffer_size(struct e1000_hw *hw);
140 static void eth_igb_rar_set(struct rte_eth_dev *dev,
141 struct ether_addr *mac_addr,
142 uint32_t index, uint32_t pool);
143 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
144 static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
145 struct ether_addr *addr);
147 static void igbvf_intr_disable(struct e1000_hw *hw);
148 static int igbvf_dev_configure(struct rte_eth_dev *dev);
149 static int igbvf_dev_start(struct rte_eth_dev *dev);
150 static void igbvf_dev_stop(struct rte_eth_dev *dev);
151 static void igbvf_dev_close(struct rte_eth_dev *dev);
152 static int eth_igbvf_link_update(struct e1000_hw *hw);
153 static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
154 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
155 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
156 uint16_t vlan_id, int on);
157 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
158 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
159 static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
160 struct ether_addr *addr);
161 static int igbvf_get_reg_length(struct rte_eth_dev *dev);
162 static int igbvf_get_regs(struct rte_eth_dev *dev,
163 struct rte_dev_reg_info *regs);
165 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
166 struct rte_eth_rss_reta_entry64 *reta_conf,
168 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
169 struct rte_eth_rss_reta_entry64 *reta_conf,
172 static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
173 struct rte_eth_syn_filter *filter,
175 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
176 struct rte_eth_syn_filter *filter);
177 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
178 enum rte_filter_op filter_op,
180 static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
181 struct rte_eth_ntuple_filter *ntuple_filter);
182 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
183 struct rte_eth_ntuple_filter *ntuple_filter);
184 static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
185 struct rte_eth_flex_filter *filter,
187 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
188 struct rte_eth_flex_filter *filter);
189 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
190 enum rte_filter_op filter_op,
192 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
193 struct rte_eth_ntuple_filter *ntuple_filter);
194 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
195 struct rte_eth_ntuple_filter *ntuple_filter);
196 static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
197 struct rte_eth_ntuple_filter *filter,
199 static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
200 struct rte_eth_ntuple_filter *filter);
201 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
202 enum rte_filter_op filter_op,
204 static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
205 struct rte_eth_ethertype_filter *filter,
207 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
208 enum rte_filter_op filter_op,
210 static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
211 struct rte_eth_ethertype_filter *filter);
212 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
213 enum rte_filter_type filter_type,
214 enum rte_filter_op filter_op,
216 static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
217 static int eth_igb_get_regs(struct rte_eth_dev *dev,
218 struct rte_dev_reg_info *regs);
219 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
220 static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
221 struct rte_dev_eeprom_info *eeprom);
222 static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
223 struct rte_dev_eeprom_info *eeprom);
224 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
225 struct ether_addr *mc_addr_set,
226 uint32_t nb_mc_addr);
227 static int igb_timesync_enable(struct rte_eth_dev *dev);
228 static int igb_timesync_disable(struct rte_eth_dev *dev);
229 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
230 struct timespec *timestamp,
232 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
233 struct timespec *timestamp);
235 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
237 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
239 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
240 uint8_t queue, uint8_t msix_vector);
241 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
242 uint8_t index, uint8_t offset);
244 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
247 * Define VF Stats MACRO for Non "cleared on read" register
249 #define UPDATE_VF_STAT(reg, last, cur) \
251 u32 latest = E1000_READ_REG(hw, reg); \
252 cur += latest - last; \
257 #define IGB_FC_PAUSE_TIME 0x0680
258 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
259 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
261 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
263 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
266 * The set of PCI devices this driver supports
268 static const struct rte_pci_id pci_id_igb_map[] = {
270 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
271 #include "rte_pci_dev_ids.h"
277 * The set of PCI devices this driver supports (for 82576&I350 VF)
279 static const struct rte_pci_id pci_id_igbvf_map[] = {
281 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
282 #include "rte_pci_dev_ids.h"
287 static const struct eth_dev_ops eth_igb_ops = {
288 .dev_configure = eth_igb_configure,
289 .dev_start = eth_igb_start,
290 .dev_stop = eth_igb_stop,
291 .dev_close = eth_igb_close,
292 .promiscuous_enable = eth_igb_promiscuous_enable,
293 .promiscuous_disable = eth_igb_promiscuous_disable,
294 .allmulticast_enable = eth_igb_allmulticast_enable,
295 .allmulticast_disable = eth_igb_allmulticast_disable,
296 .link_update = eth_igb_link_update,
297 .stats_get = eth_igb_stats_get,
298 .stats_reset = eth_igb_stats_reset,
299 .dev_infos_get = eth_igb_infos_get,
300 .mtu_set = eth_igb_mtu_set,
301 .vlan_filter_set = eth_igb_vlan_filter_set,
302 .vlan_tpid_set = eth_igb_vlan_tpid_set,
303 .vlan_offload_set = eth_igb_vlan_offload_set,
304 .rx_queue_setup = eth_igb_rx_queue_setup,
306 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
307 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
309 .rx_queue_release = eth_igb_rx_queue_release,
310 .rx_queue_count = eth_igb_rx_queue_count,
311 .rx_descriptor_done = eth_igb_rx_descriptor_done,
312 .tx_queue_setup = eth_igb_tx_queue_setup,
313 .tx_queue_release = eth_igb_tx_queue_release,
314 .dev_led_on = eth_igb_led_on,
315 .dev_led_off = eth_igb_led_off,
316 .flow_ctrl_get = eth_igb_flow_ctrl_get,
317 .flow_ctrl_set = eth_igb_flow_ctrl_set,
318 .mac_addr_add = eth_igb_rar_set,
319 .mac_addr_remove = eth_igb_rar_clear,
320 .mac_addr_set = eth_igb_default_mac_addr_set,
321 .reta_update = eth_igb_rss_reta_update,
322 .reta_query = eth_igb_rss_reta_query,
323 .rss_hash_update = eth_igb_rss_hash_update,
324 .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
325 .filter_ctrl = eth_igb_filter_ctrl,
326 .set_mc_addr_list = eth_igb_set_mc_addr_list,
327 .timesync_enable = igb_timesync_enable,
328 .timesync_disable = igb_timesync_disable,
329 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
330 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
331 .get_reg_length = eth_igb_get_reg_length,
332 .get_reg = eth_igb_get_regs,
333 .get_eeprom_length = eth_igb_get_eeprom_length,
334 .get_eeprom = eth_igb_get_eeprom,
335 .set_eeprom = eth_igb_set_eeprom,
339 * dev_ops for virtual function, bare necessities for basic vf
340 * operation have been implemented
342 static const struct eth_dev_ops igbvf_eth_dev_ops = {
343 .dev_configure = igbvf_dev_configure,
344 .dev_start = igbvf_dev_start,
345 .dev_stop = igbvf_dev_stop,
346 .dev_close = igbvf_dev_close,
347 .link_update = eth_igb_link_update,
348 .stats_get = eth_igbvf_stats_get,
349 .stats_reset = eth_igbvf_stats_reset,
350 .vlan_filter_set = igbvf_vlan_filter_set,
351 .dev_infos_get = eth_igbvf_infos_get,
352 .rx_queue_setup = eth_igb_rx_queue_setup,
353 .rx_queue_release = eth_igb_rx_queue_release,
354 .tx_queue_setup = eth_igb_tx_queue_setup,
355 .tx_queue_release = eth_igb_tx_queue_release,
356 .set_mc_addr_list = eth_igb_set_mc_addr_list,
357 .mac_addr_set = igbvf_default_mac_addr_set,
358 .get_reg_length = igbvf_get_reg_length,
359 .get_reg = igbvf_get_regs,
363 * Atomically reads the link status information from global
364 * structure rte_eth_dev.
367 * - Pointer to the structure rte_eth_dev to read from.
368 * - Pointer to the buffer to be saved with the link status.
371 * - On success, zero.
372 * - On failure, negative value.
375 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
376 struct rte_eth_link *link)
378 struct rte_eth_link *dst = link;
379 struct rte_eth_link *src = &(dev->data->dev_link);
381 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
382 *(uint64_t *)src) == 0)
389 * Atomically writes the link status information into global
390 * structure rte_eth_dev.
393 * - Pointer to the structure rte_eth_dev to read from.
394 * - Pointer to the buffer to be saved with the link status.
397 * - On success, zero.
398 * - On failure, negative value.
401 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
402 struct rte_eth_link *link)
404 struct rte_eth_link *dst = &(dev->data->dev_link);
405 struct rte_eth_link *src = link;
407 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
408 *(uint64_t *)src) == 0)
415 igb_intr_enable(struct rte_eth_dev *dev)
417 struct e1000_interrupt *intr =
418 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
419 struct e1000_hw *hw =
420 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
422 E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
423 E1000_WRITE_FLUSH(hw);
427 igb_intr_disable(struct e1000_hw *hw)
429 E1000_WRITE_REG(hw, E1000_IMC, ~0);
430 E1000_WRITE_FLUSH(hw);
433 static inline int32_t
434 igb_pf_reset_hw(struct e1000_hw *hw)
439 status = e1000_reset_hw(hw);
441 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
442 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
443 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
444 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
445 E1000_WRITE_FLUSH(hw);
451 igb_identify_hardware(struct rte_eth_dev *dev)
453 struct e1000_hw *hw =
454 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
456 hw->vendor_id = dev->pci_dev->id.vendor_id;
457 hw->device_id = dev->pci_dev->id.device_id;
458 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
459 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
461 e1000_set_mac_type(hw);
463 /* need to check if it is a vf device below */
467 igb_reset_swfw_lock(struct e1000_hw *hw)
472 * Do mac ops initialization manually here, since we will need
473 * some function pointers set by this call.
475 ret_val = e1000_init_mac_params(hw);
480 * SMBI lock should not fail in this early stage. If this is the case,
481 * it is due to an improper exit of the application.
482 * So force the release of the faulty lock.
484 if (e1000_get_hw_semaphore_generic(hw) < 0) {
485 PMD_DRV_LOG(DEBUG, "SMBI lock released");
487 e1000_put_hw_semaphore_generic(hw);
489 if (hw->mac.ops.acquire_swfw_sync != NULL) {
493 * Phy lock should not fail in this early stage. If this is the case,
494 * it is due to an improper exit of the application.
495 * So force the release of the faulty lock.
497 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
498 if (hw->bus.func > E1000_FUNC_1)
500 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
501 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
504 hw->mac.ops.release_swfw_sync(hw, mask);
507 * This one is more tricky since it is common to all ports; but
508 * swfw_sync retries last long enough (1s) to be almost sure that if
509 * lock can not be taken it is due to an improper lock of the
512 mask = E1000_SWFW_EEP_SM;
513 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
514 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
516 hw->mac.ops.release_swfw_sync(hw, mask);
519 return E1000_SUCCESS;
523 eth_igb_dev_init(struct rte_eth_dev *eth_dev)
526 struct rte_pci_device *pci_dev;
527 struct e1000_hw *hw =
528 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
529 struct e1000_vfta * shadow_vfta =
530 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
531 struct e1000_filter_info *filter_info =
532 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
533 struct e1000_adapter *adapter =
534 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
538 pci_dev = eth_dev->pci_dev;
539 eth_dev->dev_ops = ð_igb_ops;
540 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
541 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
543 /* for secondary processes, we don't initialise any further as primary
544 * has already done this work. Only check we don't need a different
546 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
547 if (eth_dev->data->scattered_rx)
548 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
552 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
554 igb_identify_hardware(eth_dev);
555 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
560 e1000_get_bus_info(hw);
562 /* Reset any pending lock */
563 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
568 /* Finish initialization */
569 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
575 hw->phy.autoneg_wait_to_complete = 0;
576 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
579 if (hw->phy.media_type == e1000_media_type_copper) {
580 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
581 hw->phy.disable_polarity_correction = 0;
582 hw->phy.ms_type = e1000_ms_hw_default;
586 * Start from a known state, this is important in reading the nvm
591 /* Make sure we have a good EEPROM before we read from it */
592 if (e1000_validate_nvm_checksum(hw) < 0) {
594 * Some PCI-E parts fail the first check due to
595 * the link being in sleep state, call it again,
596 * if it fails a second time its a real issue.
598 if (e1000_validate_nvm_checksum(hw) < 0) {
599 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
605 /* Read the permanent MAC address out of the EEPROM */
606 if (e1000_read_mac_addr(hw) != 0) {
607 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
612 /* Allocate memory for storing MAC addresses */
613 eth_dev->data->mac_addrs = rte_zmalloc("e1000",
614 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
615 if (eth_dev->data->mac_addrs == NULL) {
616 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
617 "store MAC addresses",
618 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
623 /* Copy the permanent MAC address */
624 ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]);
626 /* initialize the vfta */
627 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
629 /* Now initialize the hardware */
630 if (igb_hardware_init(hw) != 0) {
631 PMD_INIT_LOG(ERR, "Hardware initialization failed");
632 rte_free(eth_dev->data->mac_addrs);
633 eth_dev->data->mac_addrs = NULL;
637 hw->mac.get_link_status = 1;
638 adapter->stopped = 0;
640 /* Indicate SOL/IDER usage */
641 if (e1000_check_reset_block(hw) < 0) {
642 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
646 /* initialize PF if max_vfs not zero */
647 igb_pf_host_init(eth_dev);
649 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
650 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
651 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
652 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
653 E1000_WRITE_FLUSH(hw);
655 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
656 eth_dev->data->port_id, pci_dev->id.vendor_id,
657 pci_dev->id.device_id);
659 /* enable support intr */
660 igb_intr_enable(eth_dev);
662 TAILQ_INIT(&filter_info->flex_list);
663 filter_info->flex_mask = 0;
664 TAILQ_INIT(&filter_info->twotuple_list);
665 filter_info->twotuple_mask = 0;
666 TAILQ_INIT(&filter_info->fivetuple_list);
667 filter_info->fivetuple_mask = 0;
672 igb_hw_control_release(hw);
678 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
680 struct rte_pci_device *pci_dev;
682 struct e1000_adapter *adapter =
683 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
685 PMD_INIT_FUNC_TRACE();
687 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
690 hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
691 pci_dev = eth_dev->pci_dev;
693 if (adapter->stopped == 0)
694 eth_igb_close(eth_dev);
696 eth_dev->dev_ops = NULL;
697 eth_dev->rx_pkt_burst = NULL;
698 eth_dev->tx_pkt_burst = NULL;
700 /* Reset any pending lock */
701 igb_reset_swfw_lock(hw);
703 rte_free(eth_dev->data->mac_addrs);
704 eth_dev->data->mac_addrs = NULL;
706 /* uninitialize PF if max_vfs not zero */
707 igb_pf_host_uninit(eth_dev);
709 /* disable uio intr before callback unregister */
710 rte_intr_disable(&(pci_dev->intr_handle));
711 rte_intr_callback_unregister(&(pci_dev->intr_handle),
712 eth_igb_interrupt_handler, (void *)eth_dev);
718 * Virtual Function device init
721 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
723 struct rte_pci_device *pci_dev;
724 struct e1000_adapter *adapter =
725 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
726 struct e1000_hw *hw =
727 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
730 PMD_INIT_FUNC_TRACE();
732 eth_dev->dev_ops = &igbvf_eth_dev_ops;
733 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
734 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
736 /* for secondary processes, we don't initialise any further as primary
737 * has already done this work. Only check we don't need a different
739 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
740 if (eth_dev->data->scattered_rx)
741 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
745 pci_dev = eth_dev->pci_dev;
747 hw->device_id = pci_dev->id.device_id;
748 hw->vendor_id = pci_dev->id.vendor_id;
749 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
750 adapter->stopped = 0;
752 /* Initialize the shared code (base driver) */
753 diag = e1000_setup_init_funcs(hw, TRUE);
755 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
760 /* init_mailbox_params */
761 hw->mbx.ops.init_params(hw);
763 /* Disable the interrupts for VF */
764 igbvf_intr_disable(hw);
766 diag = hw->mac.ops.reset_hw(hw);
768 /* Allocate memory for storing MAC addresses */
769 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
770 hw->mac.rar_entry_count, 0);
771 if (eth_dev->data->mac_addrs == NULL) {
773 "Failed to allocate %d bytes needed to store MAC "
775 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
779 /* Copy the permanent MAC address */
780 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
781 ð_dev->data->mac_addrs[0]);
783 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
785 eth_dev->data->port_id, pci_dev->id.vendor_id,
786 pci_dev->id.device_id, "igb_mac_82576_vf");
792 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
794 struct e1000_adapter *adapter =
795 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
797 PMD_INIT_FUNC_TRACE();
799 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
802 if (adapter->stopped == 0)
803 igbvf_dev_close(eth_dev);
805 eth_dev->dev_ops = NULL;
806 eth_dev->rx_pkt_burst = NULL;
807 eth_dev->tx_pkt_burst = NULL;
809 rte_free(eth_dev->data->mac_addrs);
810 eth_dev->data->mac_addrs = NULL;
815 static struct eth_driver rte_igb_pmd = {
817 .name = "rte_igb_pmd",
818 .id_table = pci_id_igb_map,
819 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
820 RTE_PCI_DRV_DETACHABLE,
822 .eth_dev_init = eth_igb_dev_init,
823 .eth_dev_uninit = eth_igb_dev_uninit,
824 .dev_private_size = sizeof(struct e1000_adapter),
828 * virtual function driver struct
830 static struct eth_driver rte_igbvf_pmd = {
832 .name = "rte_igbvf_pmd",
833 .id_table = pci_id_igbvf_map,
834 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
836 .eth_dev_init = eth_igbvf_dev_init,
837 .eth_dev_uninit = eth_igbvf_dev_uninit,
838 .dev_private_size = sizeof(struct e1000_adapter),
842 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
844 rte_eth_driver_register(&rte_igb_pmd);
849 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
851 struct e1000_hw *hw =
852 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
853 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
854 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
855 rctl |= E1000_RCTL_VFE;
856 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
860 * VF Driver initialization routine.
861 * Invoked one at EAL init time.
862 * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
865 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
867 PMD_INIT_FUNC_TRACE();
869 rte_eth_driver_register(&rte_igbvf_pmd);
874 eth_igb_configure(struct rte_eth_dev *dev)
876 struct e1000_interrupt *intr =
877 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
879 PMD_INIT_FUNC_TRACE();
880 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
881 PMD_INIT_FUNC_TRACE();
887 eth_igb_start(struct rte_eth_dev *dev)
889 struct e1000_hw *hw =
890 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
891 struct e1000_adapter *adapter =
892 E1000_DEV_PRIVATE(dev->data->dev_private);
893 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
896 uint32_t intr_vector = 0;
900 PMD_INIT_FUNC_TRACE();
902 /* Power up the phy. Needed to make the link go Up */
903 e1000_power_up_phy(hw);
906 * Packet Buffer Allocation (PBA)
907 * Writing PBA sets the receive portion of the buffer
908 * the remainder is used for the transmit buffer.
910 if (hw->mac.type == e1000_82575) {
913 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
914 E1000_WRITE_REG(hw, E1000_PBA, pba);
917 /* Put the address into the Receive Address Array */
918 e1000_rar_set(hw, hw->mac.addr, 0);
920 /* Initialize the hardware */
921 if (igb_hardware_init(hw)) {
922 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
925 adapter->stopped = 0;
927 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
929 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
930 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
931 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
932 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
933 E1000_WRITE_FLUSH(hw);
935 /* configure PF module if SRIOV enabled */
936 igb_pf_host_configure(dev);
939 /* check and configure queue intr-vector mapping */
940 if (dev->data->dev_conf.intr_conf.rxq != 0)
941 intr_vector = dev->data->nb_rx_queues;
943 if (rte_intr_efd_enable(intr_handle, intr_vector))
946 if (rte_intr_dp_is_en(intr_handle)) {
947 intr_handle->intr_vec =
948 rte_zmalloc("intr_vec",
949 dev->data->nb_rx_queues * sizeof(int), 0);
950 if (intr_handle->intr_vec == NULL) {
951 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
952 " intr_vec\n", dev->data->nb_rx_queues);
958 /* confiugre msix for rx interrupt */
959 eth_igb_configure_msix_intr(dev);
961 /* Configure for OS presence */
962 igb_init_manageability(hw);
964 eth_igb_tx_init(dev);
966 /* This can fail when allocating mbufs for descriptor rings */
967 ret = eth_igb_rx_init(dev);
969 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
970 igb_dev_clear_queues(dev);
974 e1000_clear_hw_cntrs_base_generic(hw);
977 * VLAN Offload Settings
979 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
980 ETH_VLAN_EXTEND_MASK;
981 eth_igb_vlan_offload_set(dev, mask);
983 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
984 /* Enable VLAN filter since VMDq always use VLAN filter */
985 igb_vmdq_vlan_hw_filter_enable(dev);
988 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
989 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
990 (hw->mac.type == e1000_i211)) {
991 /* Configure EITR with the maximum possible value (0xFFFF) */
992 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
995 /* Setup link speed and duplex */
996 switch (dev->data->dev_conf.link_speed) {
997 case ETH_LINK_SPEED_AUTONEG:
998 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
999 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
1000 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1001 hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
1002 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1003 hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
1005 goto error_invalid_config;
1007 case ETH_LINK_SPEED_10:
1008 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
1009 hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
1010 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1011 hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
1012 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1013 hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
1015 goto error_invalid_config;
1017 case ETH_LINK_SPEED_100:
1018 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
1019 hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
1020 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1021 hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
1022 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1023 hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
1025 goto error_invalid_config;
1027 case ETH_LINK_SPEED_1000:
1028 if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
1029 (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
1030 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
1032 goto error_invalid_config;
1034 case ETH_LINK_SPEED_10000:
1036 goto error_invalid_config;
1038 e1000_setup_link(hw);
1040 /* check if lsc interrupt feature is enabled */
1041 if (dev->data->dev_conf.intr_conf.lsc != 0) {
1042 if (rte_intr_allow_others(intr_handle)) {
1043 rte_intr_callback_register(intr_handle,
1044 eth_igb_interrupt_handler,
1046 eth_igb_lsc_interrupt_setup(dev);
1048 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1049 " no intr multiplex\n");
1053 /* check if rxq interrupt is enabled */
1054 if (dev->data->dev_conf.intr_conf.rxq != 0)
1055 eth_igb_rxq_interrupt_setup(dev);
1058 /* enable uio/vfio intr/eventfd mapping */
1059 rte_intr_enable(intr_handle);
1061 /* resume enabled intr since hw reset */
1062 igb_intr_enable(dev);
1064 PMD_INIT_LOG(DEBUG, "<<");
1068 error_invalid_config:
1069 PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
1070 dev->data->dev_conf.link_speed,
1071 dev->data->dev_conf.link_duplex, dev->data->port_id);
1072 igb_dev_clear_queues(dev);
1076 /*********************************************************************
1078 * This routine disables all traffic on the adapter by issuing a
1079 * global reset on the MAC.
1081 **********************************************************************/
1083 eth_igb_stop(struct rte_eth_dev *dev)
1085 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1086 struct e1000_filter_info *filter_info =
1087 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1088 struct rte_eth_link link;
1089 struct e1000_flex_filter *p_flex;
1090 struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
1091 struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
1092 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1094 igb_intr_disable(hw);
1096 /* disable intr eventfd mapping */
1097 rte_intr_disable(intr_handle);
1099 igb_pf_reset_hw(hw);
1100 E1000_WRITE_REG(hw, E1000_WUC, 0);
1102 /* Set bit for Go Link disconnect */
1103 if (hw->mac.type >= e1000_82580) {
1106 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1107 phpm_reg |= E1000_82580_PM_GO_LINKD;
1108 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1111 /* Power down the phy. Needed to make the link go Down */
1112 if (hw->phy.media_type == e1000_media_type_copper)
1113 e1000_power_down_phy(hw);
1115 e1000_shutdown_fiber_serdes_link(hw);
1117 igb_dev_clear_queues(dev);
1119 /* clear the recorded link status */
1120 memset(&link, 0, sizeof(link));
1121 rte_igb_dev_atomic_write_link_status(dev, &link);
1123 /* Remove all flex filters of the device */
1124 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
1125 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
1128 filter_info->flex_mask = 0;
1130 /* Remove all ntuple filters of the device */
1131 for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
1132 p_5tuple != NULL; p_5tuple = p_5tuple_next) {
1133 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
1134 TAILQ_REMOVE(&filter_info->fivetuple_list,
1138 filter_info->fivetuple_mask = 0;
1139 for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
1140 p_2tuple != NULL; p_2tuple = p_2tuple_next) {
1141 p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
1142 TAILQ_REMOVE(&filter_info->twotuple_list,
1146 filter_info->twotuple_mask = 0;
1149 /* Clean datapath event and queue/vec mapping */
1150 rte_intr_efd_disable(intr_handle);
1151 if (intr_handle->intr_vec != NULL) {
1152 rte_free(intr_handle->intr_vec);
1153 intr_handle->intr_vec = NULL;
1159 eth_igb_close(struct rte_eth_dev *dev)
1161 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1162 struct e1000_adapter *adapter =
1163 E1000_DEV_PRIVATE(dev->data->dev_private);
1164 struct rte_eth_link link;
1166 struct rte_pci_device *pci_dev;
1170 adapter->stopped = 1;
1172 e1000_phy_hw_reset(hw);
1173 igb_release_manageability(hw);
1174 igb_hw_control_release(hw);
1176 /* Clear bit for Go Link disconnect */
1177 if (hw->mac.type >= e1000_82580) {
1180 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1181 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1182 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1185 igb_dev_free_queues(dev);
1188 pci_dev = dev->pci_dev;
1189 if (pci_dev->intr_handle.intr_vec) {
1190 rte_free(pci_dev->intr_handle.intr_vec);
1191 pci_dev->intr_handle.intr_vec = NULL;
1195 memset(&link, 0, sizeof(link));
1196 rte_igb_dev_atomic_write_link_status(dev, &link);
1200 igb_get_rx_buffer_size(struct e1000_hw *hw)
1202 uint32_t rx_buf_size;
1203 if (hw->mac.type == e1000_82576) {
1204 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1205 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1206 /* PBS needs to be translated according to a lookup table */
1207 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1208 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1209 rx_buf_size = (rx_buf_size << 10);
1210 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1211 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1213 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1219 /*********************************************************************
1221 * Initialize the hardware
1223 **********************************************************************/
1225 igb_hardware_init(struct e1000_hw *hw)
1227 uint32_t rx_buf_size;
1230 /* Let the firmware know the OS is in control */
1231 igb_hw_control_acquire(hw);
1234 * These parameters control the automatic generation (Tx) and
1235 * response (Rx) to Ethernet PAUSE frames.
1236 * - High water mark should allow for at least two standard size (1518)
1237 * frames to be received after sending an XOFF.
1238 * - Low water mark works best when it is very near the high water mark.
1239 * This allows the receiver to restart by sending XON when it has
1240 * drained a bit. Here we use an arbitrary value of 1500 which will
1241 * restart after one full frame is pulled from the buffer. There
1242 * could be several smaller frames in the buffer and if so they will
1243 * not trigger the XON until their total number reduces the buffer
1245 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1247 rx_buf_size = igb_get_rx_buffer_size(hw);
1249 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1250 hw->fc.low_water = hw->fc.high_water - 1500;
1251 hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1252 hw->fc.send_xon = 1;
1254 /* Set Flow control, use the tunable location if sane */
1255 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1256 hw->fc.requested_mode = igb_fc_setting;
1258 hw->fc.requested_mode = e1000_fc_none;
1260 /* Issue a global reset */
1261 igb_pf_reset_hw(hw);
1262 E1000_WRITE_REG(hw, E1000_WUC, 0);
1264 diag = e1000_init_hw(hw);
1268 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1269 e1000_get_phy_info(hw);
1270 e1000_check_for_link(hw);
1275 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1277 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1279 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1280 struct e1000_hw_stats *stats =
1281 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1284 if(hw->phy.media_type == e1000_media_type_copper ||
1285 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1287 E1000_READ_REG(hw,E1000_SYMERRS);
1288 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1291 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1292 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1293 stats->scc += E1000_READ_REG(hw, E1000_SCC);
1294 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1296 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1297 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1298 stats->colc += E1000_READ_REG(hw, E1000_COLC);
1299 stats->dc += E1000_READ_REG(hw, E1000_DC);
1300 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1301 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1302 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1304 ** For watchdog management we need to know if we have been
1305 ** paused during the last interval, so capture that here.
1307 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1308 stats->xoffrxc += pause_frames;
1309 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1310 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1311 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1312 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1313 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1314 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1315 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1316 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1317 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1318 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1319 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1320 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1322 /* For the 64-bit byte counters the low dword must be read first. */
1323 /* Both registers clear on the read of the high dword */
1325 stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1326 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1327 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1328 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1330 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1331 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1332 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1333 stats->roc += E1000_READ_REG(hw, E1000_ROC);
1334 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1336 stats->tor += E1000_READ_REG(hw, E1000_TORH);
1337 stats->tot += E1000_READ_REG(hw, E1000_TOTH);
1339 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1340 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1341 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1342 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1343 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1344 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1345 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1346 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1347 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1348 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1350 /* Interrupt Counts */
1352 stats->iac += E1000_READ_REG(hw, E1000_IAC);
1353 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1354 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1355 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1356 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1357 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1358 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1359 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1360 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1362 /* Host to Card Statistics */
1364 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1365 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1366 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1367 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1368 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1369 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1370 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1371 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1372 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1373 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1374 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1375 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1376 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1377 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1379 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1380 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1381 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1382 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1383 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1384 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1386 if (rte_stats == NULL)
1390 rte_stats->ibadcrc = stats->crcerrs;
1391 rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
1392 rte_stats->imissed = stats->mpc;
1393 rte_stats->ierrors = rte_stats->ibadcrc +
1394 rte_stats->ibadlen +
1395 rte_stats->imissed +
1396 stats->rxerrc + stats->algnerrc + stats->cexterr;
1399 rte_stats->oerrors = stats->ecol + stats->latecol;
1401 /* XON/XOFF pause frames */
1402 rte_stats->tx_pause_xon = stats->xontxc;
1403 rte_stats->rx_pause_xon = stats->xonrxc;
1404 rte_stats->tx_pause_xoff = stats->xofftxc;
1405 rte_stats->rx_pause_xoff = stats->xoffrxc;
1407 rte_stats->ipackets = stats->gprc;
1408 rte_stats->opackets = stats->gptc;
1409 rte_stats->ibytes = stats->gorc;
1410 rte_stats->obytes = stats->gotc;
1414 eth_igb_stats_reset(struct rte_eth_dev *dev)
1416 struct e1000_hw_stats *hw_stats =
1417 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1419 /* HW registers are cleared on read */
1420 eth_igb_stats_get(dev, NULL);
1422 /* Reset software totals */
1423 memset(hw_stats, 0, sizeof(*hw_stats));
1427 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1429 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1430 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1431 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1433 /* Good Rx packets, include VF loopback */
1434 UPDATE_VF_STAT(E1000_VFGPRC,
1435 hw_stats->last_gprc, hw_stats->gprc);
1437 /* Good Rx octets, include VF loopback */
1438 UPDATE_VF_STAT(E1000_VFGORC,
1439 hw_stats->last_gorc, hw_stats->gorc);
1441 /* Good Tx packets, include VF loopback */
1442 UPDATE_VF_STAT(E1000_VFGPTC,
1443 hw_stats->last_gptc, hw_stats->gptc);
1445 /* Good Tx octets, include VF loopback */
1446 UPDATE_VF_STAT(E1000_VFGOTC,
1447 hw_stats->last_gotc, hw_stats->gotc);
1449 /* Rx Multicst packets */
1450 UPDATE_VF_STAT(E1000_VFMPRC,
1451 hw_stats->last_mprc, hw_stats->mprc);
1453 /* Good Rx loopback packets */
1454 UPDATE_VF_STAT(E1000_VFGPRLBC,
1455 hw_stats->last_gprlbc, hw_stats->gprlbc);
1457 /* Good Rx loopback octets */
1458 UPDATE_VF_STAT(E1000_VFGORLBC,
1459 hw_stats->last_gorlbc, hw_stats->gorlbc);
1461 /* Good Tx loopback packets */
1462 UPDATE_VF_STAT(E1000_VFGPTLBC,
1463 hw_stats->last_gptlbc, hw_stats->gptlbc);
1465 /* Good Tx loopback octets */
1466 UPDATE_VF_STAT(E1000_VFGOTLBC,
1467 hw_stats->last_gotlbc, hw_stats->gotlbc);
1469 if (rte_stats == NULL)
1472 rte_stats->ipackets = hw_stats->gprc;
1473 rte_stats->ibytes = hw_stats->gorc;
1474 rte_stats->opackets = hw_stats->gptc;
1475 rte_stats->obytes = hw_stats->gotc;
1476 rte_stats->imcasts = hw_stats->mprc;
1477 rte_stats->ilbpackets = hw_stats->gprlbc;
1478 rte_stats->ilbbytes = hw_stats->gorlbc;
1479 rte_stats->olbpackets = hw_stats->gptlbc;
1480 rte_stats->olbbytes = hw_stats->gotlbc;
1485 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1487 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1488 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1490 /* Sync HW register to the last stats */
1491 eth_igbvf_stats_get(dev, NULL);
1493 /* reset HW current stats*/
1494 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1495 offsetof(struct e1000_vf_stats, gprc));
1500 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1502 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1504 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1505 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1506 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1507 dev_info->rx_offload_capa =
1508 DEV_RX_OFFLOAD_VLAN_STRIP |
1509 DEV_RX_OFFLOAD_IPV4_CKSUM |
1510 DEV_RX_OFFLOAD_UDP_CKSUM |
1511 DEV_RX_OFFLOAD_TCP_CKSUM;
1512 dev_info->tx_offload_capa =
1513 DEV_TX_OFFLOAD_VLAN_INSERT |
1514 DEV_TX_OFFLOAD_IPV4_CKSUM |
1515 DEV_TX_OFFLOAD_UDP_CKSUM |
1516 DEV_TX_OFFLOAD_TCP_CKSUM |
1517 DEV_TX_OFFLOAD_SCTP_CKSUM;
1519 switch (hw->mac.type) {
1521 dev_info->max_rx_queues = 4;
1522 dev_info->max_tx_queues = 4;
1523 dev_info->max_vmdq_pools = 0;
1527 dev_info->max_rx_queues = 16;
1528 dev_info->max_tx_queues = 16;
1529 dev_info->max_vmdq_pools = ETH_8_POOLS;
1530 dev_info->vmdq_queue_num = 16;
1534 dev_info->max_rx_queues = 8;
1535 dev_info->max_tx_queues = 8;
1536 dev_info->max_vmdq_pools = ETH_8_POOLS;
1537 dev_info->vmdq_queue_num = 8;
1541 dev_info->max_rx_queues = 8;
1542 dev_info->max_tx_queues = 8;
1543 dev_info->max_vmdq_pools = ETH_8_POOLS;
1544 dev_info->vmdq_queue_num = 8;
1548 dev_info->max_rx_queues = 8;
1549 dev_info->max_tx_queues = 8;
1553 dev_info->max_rx_queues = 4;
1554 dev_info->max_tx_queues = 4;
1555 dev_info->max_vmdq_pools = 0;
1559 dev_info->max_rx_queues = 2;
1560 dev_info->max_tx_queues = 2;
1561 dev_info->max_vmdq_pools = 0;
1565 /* Should not happen */
1568 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
1569 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1570 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
1572 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1574 .pthresh = IGB_DEFAULT_RX_PTHRESH,
1575 .hthresh = IGB_DEFAULT_RX_HTHRESH,
1576 .wthresh = IGB_DEFAULT_RX_WTHRESH,
1578 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1582 dev_info->default_txconf = (struct rte_eth_txconf) {
1584 .pthresh = IGB_DEFAULT_TX_PTHRESH,
1585 .hthresh = IGB_DEFAULT_TX_HTHRESH,
1586 .wthresh = IGB_DEFAULT_TX_WTHRESH,
1593 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1595 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1597 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1598 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1599 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1600 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1601 DEV_RX_OFFLOAD_IPV4_CKSUM |
1602 DEV_RX_OFFLOAD_UDP_CKSUM |
1603 DEV_RX_OFFLOAD_TCP_CKSUM;
1604 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1605 DEV_TX_OFFLOAD_IPV4_CKSUM |
1606 DEV_TX_OFFLOAD_UDP_CKSUM |
1607 DEV_TX_OFFLOAD_TCP_CKSUM |
1608 DEV_TX_OFFLOAD_SCTP_CKSUM;
1609 switch (hw->mac.type) {
1611 dev_info->max_rx_queues = 2;
1612 dev_info->max_tx_queues = 2;
1614 case e1000_vfadapt_i350:
1615 dev_info->max_rx_queues = 1;
1616 dev_info->max_tx_queues = 1;
1619 /* Should not happen */
1623 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1625 .pthresh = IGB_DEFAULT_RX_PTHRESH,
1626 .hthresh = IGB_DEFAULT_RX_HTHRESH,
1627 .wthresh = IGB_DEFAULT_RX_WTHRESH,
1629 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1633 dev_info->default_txconf = (struct rte_eth_txconf) {
1635 .pthresh = IGB_DEFAULT_TX_PTHRESH,
1636 .hthresh = IGB_DEFAULT_TX_HTHRESH,
1637 .wthresh = IGB_DEFAULT_TX_WTHRESH,
1643 /* return 0 means link status changed, -1 means not changed */
1645 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1647 struct e1000_hw *hw =
1648 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1649 struct rte_eth_link link, old;
1650 int link_check, count;
1653 hw->mac.get_link_status = 1;
1655 /* possible wait-to-complete in up to 9 seconds */
1656 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
1657 /* Read the real link status */
1658 switch (hw->phy.media_type) {
1659 case e1000_media_type_copper:
1660 /* Do the work to read phy */
1661 e1000_check_for_link(hw);
1662 link_check = !hw->mac.get_link_status;
1665 case e1000_media_type_fiber:
1666 e1000_check_for_link(hw);
1667 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1671 case e1000_media_type_internal_serdes:
1672 e1000_check_for_link(hw);
1673 link_check = hw->mac.serdes_has_link;
1676 /* VF device is type_unknown */
1677 case e1000_media_type_unknown:
1678 eth_igbvf_link_update(hw);
1679 link_check = !hw->mac.get_link_status;
1685 if (link_check || wait_to_complete == 0)
1687 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
1689 memset(&link, 0, sizeof(link));
1690 rte_igb_dev_atomic_read_link_status(dev, &link);
1693 /* Now we check if a transition has happened */
1695 hw->mac.ops.get_link_up_info(hw, &link.link_speed,
1697 link.link_status = 1;
1698 } else if (!link_check) {
1699 link.link_speed = 0;
1700 link.link_duplex = 0;
1701 link.link_status = 0;
1703 rte_igb_dev_atomic_write_link_status(dev, &link);
1706 if (old.link_status == link.link_status)
1714 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
1715 * For ASF and Pass Through versions of f/w this means
1716 * that the driver is loaded.
1719 igb_hw_control_acquire(struct e1000_hw *hw)
1723 /* Let firmware know the driver has taken over */
1724 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1725 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1729 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
1730 * For ASF and Pass Through versions of f/w this means that the
1731 * driver is no longer loaded.
1734 igb_hw_control_release(struct e1000_hw *hw)
1738 /* Let firmware taken over control of h/w */
1739 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1740 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
1741 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1745 * Bit of a misnomer, what this really means is
1746 * to enable OS management of the system... aka
1747 * to disable special hardware management features.
1750 igb_init_manageability(struct e1000_hw *hw)
1752 if (e1000_enable_mng_pass_thru(hw)) {
1753 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
1754 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1756 /* disable hardware interception of ARP */
1757 manc &= ~(E1000_MANC_ARP_EN);
1759 /* enable receiving management packets to the host */
1760 manc |= E1000_MANC_EN_MNG2HOST;
1761 manc2h |= 1 << 5; /* Mng Port 623 */
1762 manc2h |= 1 << 6; /* Mng Port 664 */
1763 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
1764 E1000_WRITE_REG(hw, E1000_MANC, manc);
1769 igb_release_manageability(struct e1000_hw *hw)
1771 if (e1000_enable_mng_pass_thru(hw)) {
1772 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1774 manc |= E1000_MANC_ARP_EN;
1775 manc &= ~E1000_MANC_EN_MNG2HOST;
1777 E1000_WRITE_REG(hw, E1000_MANC, manc);
1782 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
1784 struct e1000_hw *hw =
1785 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1788 rctl = E1000_READ_REG(hw, E1000_RCTL);
1789 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1790 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1794 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
1796 struct e1000_hw *hw =
1797 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1800 rctl = E1000_READ_REG(hw, E1000_RCTL);
1801 rctl &= (~E1000_RCTL_UPE);
1802 if (dev->data->all_multicast == 1)
1803 rctl |= E1000_RCTL_MPE;
1805 rctl &= (~E1000_RCTL_MPE);
1806 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1810 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
1812 struct e1000_hw *hw =
1813 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1816 rctl = E1000_READ_REG(hw, E1000_RCTL);
1817 rctl |= E1000_RCTL_MPE;
1818 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1822 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
1824 struct e1000_hw *hw =
1825 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1828 if (dev->data->promiscuous == 1)
1829 return; /* must remain in all_multicast mode */
1830 rctl = E1000_READ_REG(hw, E1000_RCTL);
1831 rctl &= (~E1000_RCTL_MPE);
1832 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1836 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1838 struct e1000_hw *hw =
1839 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1840 struct e1000_vfta * shadow_vfta =
1841 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1846 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
1847 E1000_VFTA_ENTRY_MASK);
1848 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
1849 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
1854 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
1856 /* update local VFTA copy */
1857 shadow_vfta->vfta[vid_idx] = vfta;
1863 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1865 struct e1000_hw *hw =
1866 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1867 uint32_t reg = ETHER_TYPE_VLAN ;
1869 reg |= (tpid << 16);
1870 E1000_WRITE_REG(hw, E1000_VET, reg);
1874 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1876 struct e1000_hw *hw =
1877 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1880 /* Filter Table Disable */
1881 reg = E1000_READ_REG(hw, E1000_RCTL);
1882 reg &= ~E1000_RCTL_CFIEN;
1883 reg &= ~E1000_RCTL_VFE;
1884 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1888 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1890 struct e1000_hw *hw =
1891 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1892 struct e1000_vfta * shadow_vfta =
1893 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1897 /* Filter Table Enable, CFI not used for packet acceptance */
1898 reg = E1000_READ_REG(hw, E1000_RCTL);
1899 reg &= ~E1000_RCTL_CFIEN;
1900 reg |= E1000_RCTL_VFE;
1901 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1903 /* restore VFTA table */
1904 for (i = 0; i < IGB_VFTA_SIZE; i++)
1905 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
1909 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
1911 struct e1000_hw *hw =
1912 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1915 /* VLAN Mode Disable */
1916 reg = E1000_READ_REG(hw, E1000_CTRL);
1917 reg &= ~E1000_CTRL_VME;
1918 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1922 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
1924 struct e1000_hw *hw =
1925 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1928 /* VLAN Mode Enable */
1929 reg = E1000_READ_REG(hw, E1000_CTRL);
1930 reg |= E1000_CTRL_VME;
1931 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1935 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1937 struct e1000_hw *hw =
1938 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1941 /* CTRL_EXT: Extended VLAN */
1942 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1943 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
1944 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1946 /* Update maximum packet length */
1947 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1948 E1000_WRITE_REG(hw, E1000_RLPML,
1949 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1954 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1956 struct e1000_hw *hw =
1957 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1960 /* CTRL_EXT: Extended VLAN */
1961 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1962 reg |= E1000_CTRL_EXT_EXTEND_VLAN;
1963 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1965 /* Update maximum packet length */
1966 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1967 E1000_WRITE_REG(hw, E1000_RLPML,
1968 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1973 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1975 if(mask & ETH_VLAN_STRIP_MASK){
1976 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1977 igb_vlan_hw_strip_enable(dev);
1979 igb_vlan_hw_strip_disable(dev);
1982 if(mask & ETH_VLAN_FILTER_MASK){
1983 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1984 igb_vlan_hw_filter_enable(dev);
1986 igb_vlan_hw_filter_disable(dev);
1989 if(mask & ETH_VLAN_EXTEND_MASK){
1990 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1991 igb_vlan_hw_extend_enable(dev);
1993 igb_vlan_hw_extend_disable(dev);
1999 * It enables the interrupt mask and then enable the interrupt.
2002 * Pointer to struct rte_eth_dev.
2005 * - On success, zero.
2006 * - On failure, a negative value.
2009 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
2011 struct e1000_interrupt *intr =
2012 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2014 intr->mask |= E1000_ICR_LSC;
2020 /* It clears the interrupt causes and enables the interrupt.
2021 * It will be called once only during nic initialized.
2024 * Pointer to struct rte_eth_dev.
2027 * - On success, zero.
2028 * - On failure, a negative value.
2030 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
2032 uint32_t mask, regval;
2033 struct e1000_hw *hw =
2034 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2035 struct rte_eth_dev_info dev_info;
2037 memset(&dev_info, 0, sizeof(dev_info));
2038 eth_igb_infos_get(dev, &dev_info);
2040 mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
2041 regval = E1000_READ_REG(hw, E1000_EIMS);
2042 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
2049 * It reads ICR and gets interrupt causes, check it and set a bit flag
2050 * to update link status.
2053 * Pointer to struct rte_eth_dev.
2056 * - On success, zero.
2057 * - On failure, a negative value.
2060 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
2063 struct e1000_hw *hw =
2064 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2065 struct e1000_interrupt *intr =
2066 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2068 igb_intr_disable(hw);
2070 /* read-on-clear nic registers here */
2071 icr = E1000_READ_REG(hw, E1000_ICR);
2074 if (icr & E1000_ICR_LSC) {
2075 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
2078 if (icr & E1000_ICR_VMMB)
2079 intr->flags |= E1000_FLAG_MAILBOX;
2085 * It executes link_update after knowing an interrupt is prsent.
2088 * Pointer to struct rte_eth_dev.
2091 * - On success, zero.
2092 * - On failure, a negative value.
2095 eth_igb_interrupt_action(struct rte_eth_dev *dev)
2097 struct e1000_hw *hw =
2098 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2099 struct e1000_interrupt *intr =
2100 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2101 uint32_t tctl, rctl;
2102 struct rte_eth_link link;
2105 if (intr->flags & E1000_FLAG_MAILBOX) {
2106 igb_pf_mbx_process(dev);
2107 intr->flags &= ~E1000_FLAG_MAILBOX;
2110 igb_intr_enable(dev);
2111 rte_intr_enable(&(dev->pci_dev->intr_handle));
2113 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
2114 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
2116 /* set get_link_status to check register later */
2117 hw->mac.get_link_status = 1;
2118 ret = eth_igb_link_update(dev, 0);
2120 /* check if link has changed */
2124 memset(&link, 0, sizeof(link));
2125 rte_igb_dev_atomic_read_link_status(dev, &link);
2126 if (link.link_status) {
2128 " Port %d: Link Up - speed %u Mbps - %s",
2130 (unsigned)link.link_speed,
2131 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2132 "full-duplex" : "half-duplex");
2134 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2135 dev->data->port_id);
2138 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
2139 dev->pci_dev->addr.domain,
2140 dev->pci_dev->addr.bus,
2141 dev->pci_dev->addr.devid,
2142 dev->pci_dev->addr.function);
2143 tctl = E1000_READ_REG(hw, E1000_TCTL);
2144 rctl = E1000_READ_REG(hw, E1000_RCTL);
2145 if (link.link_status) {
2147 tctl |= E1000_TCTL_EN;
2148 rctl |= E1000_RCTL_EN;
2151 tctl &= ~E1000_TCTL_EN;
2152 rctl &= ~E1000_RCTL_EN;
2154 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2155 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2156 E1000_WRITE_FLUSH(hw);
2157 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
2164 * Interrupt handler which shall be registered at first.
2167 * Pointer to interrupt handle.
2169 * The address of parameter (struct rte_eth_dev *) regsitered before.
2175 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
2178 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2180 eth_igb_interrupt_get_status(dev);
2181 eth_igb_interrupt_action(dev);
2185 eth_igb_led_on(struct rte_eth_dev *dev)
2187 struct e1000_hw *hw;
2189 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2190 return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
2194 eth_igb_led_off(struct rte_eth_dev *dev)
2196 struct e1000_hw *hw;
2198 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2199 return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
2203 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2205 struct e1000_hw *hw;
2210 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2211 fc_conf->pause_time = hw->fc.pause_time;
2212 fc_conf->high_water = hw->fc.high_water;
2213 fc_conf->low_water = hw->fc.low_water;
2214 fc_conf->send_xon = hw->fc.send_xon;
2215 fc_conf->autoneg = hw->mac.autoneg;
2218 * Return rx_pause and tx_pause status according to actual setting of
2219 * the TFCE and RFCE bits in the CTRL register.
2221 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2222 if (ctrl & E1000_CTRL_TFCE)
2227 if (ctrl & E1000_CTRL_RFCE)
2232 if (rx_pause && tx_pause)
2233 fc_conf->mode = RTE_FC_FULL;
2235 fc_conf->mode = RTE_FC_RX_PAUSE;
2237 fc_conf->mode = RTE_FC_TX_PAUSE;
2239 fc_conf->mode = RTE_FC_NONE;
2245 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2247 struct e1000_hw *hw;
2249 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
2255 uint32_t rx_buf_size;
2256 uint32_t max_high_water;
2259 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2260 if (fc_conf->autoneg != hw->mac.autoneg)
2262 rx_buf_size = igb_get_rx_buffer_size(hw);
2263 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2265 /* At least reserve one Ethernet frame for watermark */
2266 max_high_water = rx_buf_size - ETHER_MAX_LEN;
2267 if ((fc_conf->high_water > max_high_water) ||
2268 (fc_conf->high_water < fc_conf->low_water)) {
2269 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
2270 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
2274 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
2275 hw->fc.pause_time = fc_conf->pause_time;
2276 hw->fc.high_water = fc_conf->high_water;
2277 hw->fc.low_water = fc_conf->low_water;
2278 hw->fc.send_xon = fc_conf->send_xon;
2280 err = e1000_setup_link_generic(hw);
2281 if (err == E1000_SUCCESS) {
2283 /* check if we want to forward MAC frames - driver doesn't have native
2284 * capability to do that, so we'll write the registers ourselves */
2286 rctl = E1000_READ_REG(hw, E1000_RCTL);
2288 /* set or clear MFLCN.PMCF bit depending on configuration */
2289 if (fc_conf->mac_ctrl_frame_fwd != 0)
2290 rctl |= E1000_RCTL_PMCF;
2292 rctl &= ~E1000_RCTL_PMCF;
2294 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2295 E1000_WRITE_FLUSH(hw);
2300 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
2304 #define E1000_RAH_POOLSEL_SHIFT (18)
2306 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2307 uint32_t index, __rte_unused uint32_t pool)
2309 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2312 e1000_rar_set(hw, mac_addr->addr_bytes, index);
2313 rah = E1000_READ_REG(hw, E1000_RAH(index));
2314 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
2315 E1000_WRITE_REG(hw, E1000_RAH(index), rah);
2319 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
2321 uint8_t addr[ETHER_ADDR_LEN];
2322 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2324 memset(addr, 0, sizeof(addr));
2326 e1000_rar_set(hw, addr, index);
2330 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
2331 struct ether_addr *addr)
2333 eth_igb_rar_clear(dev, 0);
2335 eth_igb_rar_set(dev, (void *)addr, 0, 0);
2338 * Virtual Function operations
2341 igbvf_intr_disable(struct e1000_hw *hw)
2343 PMD_INIT_FUNC_TRACE();
2345 /* Clear interrupt mask to stop from interrupts being generated */
2346 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
2348 E1000_WRITE_FLUSH(hw);
2352 igbvf_stop_adapter(struct rte_eth_dev *dev)
2356 struct rte_eth_dev_info dev_info;
2357 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2359 memset(&dev_info, 0, sizeof(dev_info));
2360 eth_igbvf_infos_get(dev, &dev_info);
2362 /* Clear interrupt mask to stop from interrupts being generated */
2363 igbvf_intr_disable(hw);
2365 /* Clear any pending interrupts, flush previous writes */
2366 E1000_READ_REG(hw, E1000_EICR);
2368 /* Disable the transmit unit. Each queue must be disabled. */
2369 for (i = 0; i < dev_info.max_tx_queues; i++)
2370 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
2372 /* Disable the receive unit by stopping each queue */
2373 for (i = 0; i < dev_info.max_rx_queues; i++) {
2374 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
2375 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
2376 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
2377 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
2381 /* flush all queues disables */
2382 E1000_WRITE_FLUSH(hw);
2386 static int eth_igbvf_link_update(struct e1000_hw *hw)
2388 struct e1000_mbx_info *mbx = &hw->mbx;
2389 struct e1000_mac_info *mac = &hw->mac;
2390 int ret_val = E1000_SUCCESS;
2392 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
2395 * We only want to run this if there has been a rst asserted.
2396 * in this case that could mean a link change, device reset,
2397 * or a virtual function reset
2400 /* If we were hit with a reset or timeout drop the link */
2401 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
2402 mac->get_link_status = TRUE;
2404 if (!mac->get_link_status)
2407 /* if link status is down no point in checking to see if pf is up */
2408 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
2411 /* if we passed all the tests above then the link is up and we no
2412 * longer need to check for link */
2413 mac->get_link_status = FALSE;
2421 igbvf_dev_configure(struct rte_eth_dev *dev)
2423 struct rte_eth_conf* conf = &dev->data->dev_conf;
2425 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
2426 dev->data->port_id);
2429 * VF has no ability to enable/disable HW CRC
2430 * Keep the persistent behavior the same as Host PF
2432 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
2433 if (!conf->rxmode.hw_strip_crc) {
2434 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
2435 conf->rxmode.hw_strip_crc = 1;
2438 if (conf->rxmode.hw_strip_crc) {
2439 PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
2440 conf->rxmode.hw_strip_crc = 0;
2448 igbvf_dev_start(struct rte_eth_dev *dev)
2450 struct e1000_hw *hw =
2451 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2452 struct e1000_adapter *adapter =
2453 E1000_DEV_PRIVATE(dev->data->dev_private);
2456 PMD_INIT_FUNC_TRACE();
2458 hw->mac.ops.reset_hw(hw);
2459 adapter->stopped = 0;
2462 igbvf_set_vfta_all(dev,1);
2464 eth_igbvf_tx_init(dev);
2466 /* This can fail when allocating mbufs for descriptor rings */
2467 ret = eth_igbvf_rx_init(dev);
2469 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2470 igb_dev_clear_queues(dev);
2478 igbvf_dev_stop(struct rte_eth_dev *dev)
2480 PMD_INIT_FUNC_TRACE();
2482 igbvf_stop_adapter(dev);
2485 * Clear what we set, but we still keep shadow_vfta to
2486 * restore after device starts
2488 igbvf_set_vfta_all(dev,0);
2490 igb_dev_clear_queues(dev);
2494 igbvf_dev_close(struct rte_eth_dev *dev)
2496 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2497 struct e1000_adapter *adapter =
2498 E1000_DEV_PRIVATE(dev->data->dev_private);
2500 PMD_INIT_FUNC_TRACE();
2504 igbvf_dev_stop(dev);
2505 adapter->stopped = 1;
2506 igb_dev_free_queues(dev);
2509 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
2511 struct e1000_mbx_info *mbx = &hw->mbx;
2514 /* After set vlan, vlan strip will also be enabled in igb driver*/
2515 msgbuf[0] = E1000_VF_SET_VLAN;
2517 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
2519 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
2521 return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
2524 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
2526 struct e1000_hw *hw =
2527 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2528 struct e1000_vfta * shadow_vfta =
2529 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2530 int i = 0, j = 0, vfta = 0, mask = 1;
2532 for (i = 0; i < IGB_VFTA_SIZE; i++){
2533 vfta = shadow_vfta->vfta[i];
2536 for (j = 0; j < 32; j++){
2539 (uint16_t)((i<<5)+j), on);
2548 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2550 struct e1000_hw *hw =
2551 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2552 struct e1000_vfta * shadow_vfta =
2553 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2554 uint32_t vid_idx = 0;
2555 uint32_t vid_bit = 0;
2558 PMD_INIT_FUNC_TRACE();
2560 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
2561 ret = igbvf_set_vfta(hw, vlan_id, !!on);
2563 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
2566 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
2567 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
2569 /*Save what we set and retore it after device reset*/
2571 shadow_vfta->vfta[vid_idx] |= vid_bit;
2573 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
2579 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
2581 struct e1000_hw *hw =
2582 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2584 /* index is not used by rar_set() */
2585 hw->mac.ops.rar_set(hw, (void *)addr, 0);
2590 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
2591 struct rte_eth_rss_reta_entry64 *reta_conf,
2596 uint16_t idx, shift;
2597 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2599 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2600 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2601 "(%d) doesn't match the number hardware can supported "
2602 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2606 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2607 idx = i / RTE_RETA_GROUP_SIZE;
2608 shift = i % RTE_RETA_GROUP_SIZE;
2609 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2613 if (mask == IGB_4_BIT_MASK)
2616 r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2617 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
2618 if (mask & (0x1 << j))
2619 reta |= reta_conf[idx].reta[shift + j] <<
2622 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
2624 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
2631 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
2632 struct rte_eth_rss_reta_entry64 *reta_conf,
2637 uint16_t idx, shift;
2638 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2640 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2641 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2642 "(%d) doesn't match the number hardware can supported "
2643 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2647 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2648 idx = i / RTE_RETA_GROUP_SIZE;
2649 shift = i % RTE_RETA_GROUP_SIZE;
2650 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2654 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2655 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
2656 if (mask & (0x1 << j))
2657 reta_conf[idx].reta[shift + j] =
2658 ((reta >> (CHAR_BIT * j)) &
2666 #define MAC_TYPE_FILTER_SUP(type) do {\
2667 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
2668 (type) != e1000_82576)\
2673 eth_igb_syn_filter_set(struct rte_eth_dev *dev,
2674 struct rte_eth_syn_filter *filter,
2677 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2678 uint32_t synqf, rfctl;
2680 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2683 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2686 if (synqf & E1000_SYN_FILTER_ENABLE)
2689 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
2690 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
2692 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2693 if (filter->hig_pri)
2694 rfctl |= E1000_RFCTL_SYNQFP;
2696 rfctl &= ~E1000_RFCTL_SYNQFP;
2698 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
2700 if (!(synqf & E1000_SYN_FILTER_ENABLE))
2705 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
2706 E1000_WRITE_FLUSH(hw);
2711 eth_igb_syn_filter_get(struct rte_eth_dev *dev,
2712 struct rte_eth_syn_filter *filter)
2714 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2715 uint32_t synqf, rfctl;
2717 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2718 if (synqf & E1000_SYN_FILTER_ENABLE) {
2719 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2720 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
2721 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
2722 E1000_SYN_FILTER_QUEUE_SHIFT);
2730 eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
2731 enum rte_filter_op filter_op,
2734 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2737 MAC_TYPE_FILTER_SUP(hw->mac.type);
2739 if (filter_op == RTE_ETH_FILTER_NOP)
2743 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
2748 switch (filter_op) {
2749 case RTE_ETH_FILTER_ADD:
2750 ret = eth_igb_syn_filter_set(dev,
2751 (struct rte_eth_syn_filter *)arg,
2754 case RTE_ETH_FILTER_DELETE:
2755 ret = eth_igb_syn_filter_set(dev,
2756 (struct rte_eth_syn_filter *)arg,
2759 case RTE_ETH_FILTER_GET:
2760 ret = eth_igb_syn_filter_get(dev,
2761 (struct rte_eth_syn_filter *)arg);
2764 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
2772 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
2773 if ((type) != e1000_82580 && (type) != e1000_i350)\
2777 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
2779 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
2780 struct e1000_2tuple_filter_info *filter_info)
2782 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2784 if (filter->priority > E1000_2TUPLE_MAX_PRI)
2785 return -EINVAL; /* filter index is out of range. */
2786 if (filter->tcp_flags > TCP_FLAG_ALL)
2787 return -EINVAL; /* flags is invalid. */
2789 switch (filter->dst_port_mask) {
2791 filter_info->dst_port_mask = 0;
2792 filter_info->dst_port = filter->dst_port;
2795 filter_info->dst_port_mask = 1;
2798 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
2802 switch (filter->proto_mask) {
2804 filter_info->proto_mask = 0;
2805 filter_info->proto = filter->proto;
2808 filter_info->proto_mask = 1;
2811 PMD_DRV_LOG(ERR, "invalid protocol mask.");
2815 filter_info->priority = (uint8_t)filter->priority;
2816 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
2817 filter_info->tcp_flags = filter->tcp_flags;
2819 filter_info->tcp_flags = 0;
2824 static inline struct e1000_2tuple_filter *
2825 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
2826 struct e1000_2tuple_filter_info *key)
2828 struct e1000_2tuple_filter *it;
2830 TAILQ_FOREACH(it, filter_list, entries) {
2831 if (memcmp(key, &it->filter_info,
2832 sizeof(struct e1000_2tuple_filter_info)) == 0) {
2840 * igb_add_2tuple_filter - add a 2tuple filter
2843 * dev: Pointer to struct rte_eth_dev.
2844 * ntuple_filter: ponter to the filter that will be added.
2847 * - On success, zero.
2848 * - On failure, a negative value.
2851 igb_add_2tuple_filter(struct rte_eth_dev *dev,
2852 struct rte_eth_ntuple_filter *ntuple_filter)
2854 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2855 struct e1000_filter_info *filter_info =
2856 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2857 struct e1000_2tuple_filter *filter;
2858 uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
2859 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
2862 filter = rte_zmalloc("e1000_2tuple_filter",
2863 sizeof(struct e1000_2tuple_filter), 0);
2867 ret = ntuple_filter_to_2tuple(ntuple_filter,
2868 &filter->filter_info);
2873 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2874 &filter->filter_info) != NULL) {
2875 PMD_DRV_LOG(ERR, "filter exists.");
2879 filter->queue = ntuple_filter->queue;
2882 * look for an unused 2tuple filter index,
2883 * and insert the filter to list.
2885 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
2886 if (!(filter_info->twotuple_mask & (1 << i))) {
2887 filter_info->twotuple_mask |= 1 << i;
2889 TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
2895 if (i >= E1000_MAX_TTQF_FILTERS) {
2896 PMD_DRV_LOG(ERR, "2tuple filters are full.");
2901 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
2902 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
2903 imir |= E1000_IMIR_PORT_BP;
2905 imir &= ~E1000_IMIR_PORT_BP;
2907 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
2909 ttqf |= E1000_TTQF_QUEUE_ENABLE;
2910 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
2911 ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
2912 if (filter->filter_info.proto_mask == 0)
2913 ttqf &= ~E1000_TTQF_MASK_ENABLE;
2915 /* tcp flags bits setting. */
2916 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
2917 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
2918 imir_ext |= E1000_IMIREXT_CTRL_URG;
2919 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
2920 imir_ext |= E1000_IMIREXT_CTRL_ACK;
2921 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
2922 imir_ext |= E1000_IMIREXT_CTRL_PSH;
2923 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
2924 imir_ext |= E1000_IMIREXT_CTRL_RST;
2925 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
2926 imir_ext |= E1000_IMIREXT_CTRL_SYN;
2927 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
2928 imir_ext |= E1000_IMIREXT_CTRL_FIN;
2930 imir_ext |= E1000_IMIREXT_CTRL_BP;
2931 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
2932 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
2933 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
2938 * igb_remove_2tuple_filter - remove a 2tuple filter
2941 * dev: Pointer to struct rte_eth_dev.
2942 * ntuple_filter: ponter to the filter that will be removed.
2945 * - On success, zero.
2946 * - On failure, a negative value.
2949 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
2950 struct rte_eth_ntuple_filter *ntuple_filter)
2952 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2953 struct e1000_filter_info *filter_info =
2954 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2955 struct e1000_2tuple_filter_info filter_2tuple;
2956 struct e1000_2tuple_filter *filter;
2959 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
2960 ret = ntuple_filter_to_2tuple(ntuple_filter,
2965 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2967 if (filter == NULL) {
2968 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2972 filter_info->twotuple_mask &= ~(1 << filter->index);
2973 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
2976 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
2977 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
2978 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
2982 static inline struct e1000_flex_filter *
2983 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
2984 struct e1000_flex_filter_info *key)
2986 struct e1000_flex_filter *it;
2988 TAILQ_FOREACH(it, filter_list, entries) {
2989 if (memcmp(key, &it->filter_info,
2990 sizeof(struct e1000_flex_filter_info)) == 0)
2998 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
2999 struct rte_eth_flex_filter *filter,
3002 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3003 struct e1000_filter_info *filter_info =
3004 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3005 struct e1000_flex_filter *flex_filter, *it;
3006 uint32_t wufc, queueing, mask;
3008 uint8_t shift, i, j = 0;
3010 flex_filter = rte_zmalloc("e1000_flex_filter",
3011 sizeof(struct e1000_flex_filter), 0);
3012 if (flex_filter == NULL)
3015 flex_filter->filter_info.len = filter->len;
3016 flex_filter->filter_info.priority = filter->priority;
3017 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
3018 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
3020 /* reverse bits in flex filter's mask*/
3021 for (shift = 0; shift < CHAR_BIT; shift++) {
3022 if (filter->mask[i] & (0x01 << shift))
3023 mask |= (0x80 >> shift);
3025 flex_filter->filter_info.mask[i] = mask;
3028 wufc = E1000_READ_REG(hw, E1000_WUFC);
3029 if (flex_filter->index < E1000_MAX_FHFT)
3030 reg_off = E1000_FHFT(flex_filter->index);
3032 reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
3035 if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
3036 &flex_filter->filter_info) != NULL) {
3037 PMD_DRV_LOG(ERR, "filter exists.");
3038 rte_free(flex_filter);
3041 flex_filter->queue = filter->queue;
3043 * look for an unused flex filter index
3044 * and insert the filter into the list.
3046 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
3047 if (!(filter_info->flex_mask & (1 << i))) {
3048 filter_info->flex_mask |= 1 << i;
3049 flex_filter->index = i;
3050 TAILQ_INSERT_TAIL(&filter_info->flex_list,
3056 if (i >= E1000_MAX_FLEX_FILTERS) {
3057 PMD_DRV_LOG(ERR, "flex filters are full.");
3058 rte_free(flex_filter);
3062 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
3063 (E1000_WUFC_FLX0 << flex_filter->index));
3064 queueing = filter->len |
3065 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
3066 (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
3067 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
3069 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
3070 E1000_WRITE_REG(hw, reg_off,
3071 flex_filter->filter_info.dwords[j]);
3072 reg_off += sizeof(uint32_t);
3073 E1000_WRITE_REG(hw, reg_off,
3074 flex_filter->filter_info.dwords[++j]);
3075 reg_off += sizeof(uint32_t);
3076 E1000_WRITE_REG(hw, reg_off,
3077 (uint32_t)flex_filter->filter_info.mask[i]);
3078 reg_off += sizeof(uint32_t) * 2;
3082 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3083 &flex_filter->filter_info);
3085 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3086 rte_free(flex_filter);
3090 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
3091 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
3092 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
3093 (~(E1000_WUFC_FLX0 << it->index)));
3095 filter_info->flex_mask &= ~(1 << it->index);
3096 TAILQ_REMOVE(&filter_info->flex_list, it, entries);
3098 rte_free(flex_filter);
3105 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
3106 struct rte_eth_flex_filter *filter)
3108 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3109 struct e1000_filter_info *filter_info =
3110 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3111 struct e1000_flex_filter flex_filter, *it;
3112 uint32_t wufc, queueing, wufc_en = 0;
3114 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
3115 flex_filter.filter_info.len = filter->len;
3116 flex_filter.filter_info.priority = filter->priority;
3117 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
3118 memcpy(flex_filter.filter_info.mask, filter->mask,
3119 RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
3121 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3122 &flex_filter.filter_info);
3124 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3128 wufc = E1000_READ_REG(hw, E1000_WUFC);
3129 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
3131 if ((wufc & wufc_en) == wufc_en) {
3132 uint32_t reg_off = 0;
3133 if (it->index < E1000_MAX_FHFT)
3134 reg_off = E1000_FHFT(it->index);
3136 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
3138 queueing = E1000_READ_REG(hw,
3139 reg_off + E1000_FHFT_QUEUEING_OFFSET);
3140 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
3141 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
3142 E1000_FHFT_QUEUEING_PRIO_SHIFT;
3143 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
3144 E1000_FHFT_QUEUEING_QUEUE_SHIFT;
3151 eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
3152 enum rte_filter_op filter_op,
3155 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3156 struct rte_eth_flex_filter *filter;
3159 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
3161 if (filter_op == RTE_ETH_FILTER_NOP)
3165 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
3170 filter = (struct rte_eth_flex_filter *)arg;
3171 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
3172 || filter->len % sizeof(uint64_t) != 0) {
3173 PMD_DRV_LOG(ERR, "filter's length is out of range");
3176 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
3177 PMD_DRV_LOG(ERR, "filter's priority is out of range");
3181 switch (filter_op) {
3182 case RTE_ETH_FILTER_ADD:
3183 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
3185 case RTE_ETH_FILTER_DELETE:
3186 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
3188 case RTE_ETH_FILTER_GET:
3189 ret = eth_igb_get_flex_filter(dev, filter);
3192 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
3200 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
3202 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
3203 struct e1000_5tuple_filter_info *filter_info)
3205 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
3207 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3208 return -EINVAL; /* filter index is out of range. */
3209 if (filter->tcp_flags > TCP_FLAG_ALL)
3210 return -EINVAL; /* flags is invalid. */
3212 switch (filter->dst_ip_mask) {
3214 filter_info->dst_ip_mask = 0;
3215 filter_info->dst_ip = filter->dst_ip;
3218 filter_info->dst_ip_mask = 1;
3221 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3225 switch (filter->src_ip_mask) {
3227 filter_info->src_ip_mask = 0;
3228 filter_info->src_ip = filter->src_ip;
3231 filter_info->src_ip_mask = 1;
3234 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3238 switch (filter->dst_port_mask) {
3240 filter_info->dst_port_mask = 0;
3241 filter_info->dst_port = filter->dst_port;
3244 filter_info->dst_port_mask = 1;
3247 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3251 switch (filter->src_port_mask) {
3253 filter_info->src_port_mask = 0;
3254 filter_info->src_port = filter->src_port;
3257 filter_info->src_port_mask = 1;
3260 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3264 switch (filter->proto_mask) {
3266 filter_info->proto_mask = 0;
3267 filter_info->proto = filter->proto;
3270 filter_info->proto_mask = 1;
3273 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3277 filter_info->priority = (uint8_t)filter->priority;
3278 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3279 filter_info->tcp_flags = filter->tcp_flags;
3281 filter_info->tcp_flags = 0;
3286 static inline struct e1000_5tuple_filter *
3287 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
3288 struct e1000_5tuple_filter_info *key)
3290 struct e1000_5tuple_filter *it;
3292 TAILQ_FOREACH(it, filter_list, entries) {
3293 if (memcmp(key, &it->filter_info,
3294 sizeof(struct e1000_5tuple_filter_info)) == 0) {
3302 * igb_add_5tuple_filter_82576 - add a 5tuple filter
3305 * dev: Pointer to struct rte_eth_dev.
3306 * ntuple_filter: ponter to the filter that will be added.
3309 * - On success, zero.
3310 * - On failure, a negative value.
3313 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
3314 struct rte_eth_ntuple_filter *ntuple_filter)
3316 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3317 struct e1000_filter_info *filter_info =
3318 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3319 struct e1000_5tuple_filter *filter;
3320 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
3321 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3325 filter = rte_zmalloc("e1000_5tuple_filter",
3326 sizeof(struct e1000_5tuple_filter), 0);
3330 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3331 &filter->filter_info);
3337 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3338 &filter->filter_info) != NULL) {
3339 PMD_DRV_LOG(ERR, "filter exists.");
3343 filter->queue = ntuple_filter->queue;
3346 * look for an unused 5tuple filter index,
3347 * and insert the filter to list.
3349 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
3350 if (!(filter_info->fivetuple_mask & (1 << i))) {
3351 filter_info->fivetuple_mask |= 1 << i;
3353 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3359 if (i >= E1000_MAX_FTQF_FILTERS) {
3360 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3365 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
3366 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
3367 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
3368 if (filter->filter_info.dst_ip_mask == 0)
3369 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
3370 if (filter->filter_info.src_port_mask == 0)
3371 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
3372 if (filter->filter_info.proto_mask == 0)
3373 ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
3374 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
3375 E1000_FTQF_QUEUE_MASK;
3376 ftqf |= E1000_FTQF_QUEUE_ENABLE;
3377 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
3378 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
3379 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
3381 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
3382 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
3384 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3385 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3386 imir |= E1000_IMIR_PORT_BP;
3388 imir &= ~E1000_IMIR_PORT_BP;
3389 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3391 /* tcp flags bits setting. */
3392 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
3393 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
3394 imir_ext |= E1000_IMIREXT_CTRL_URG;
3395 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
3396 imir_ext |= E1000_IMIREXT_CTRL_ACK;
3397 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
3398 imir_ext |= E1000_IMIREXT_CTRL_PSH;
3399 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
3400 imir_ext |= E1000_IMIREXT_CTRL_RST;
3401 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
3402 imir_ext |= E1000_IMIREXT_CTRL_SYN;
3403 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
3404 imir_ext |= E1000_IMIREXT_CTRL_FIN;
3406 imir_ext |= E1000_IMIREXT_CTRL_BP;
3407 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3408 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3413 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
3416 * dev: Pointer to struct rte_eth_dev.
3417 * ntuple_filter: ponter to the filter that will be removed.
3420 * - On success, zero.
3421 * - On failure, a negative value.
3424 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
3425 struct rte_eth_ntuple_filter *ntuple_filter)
3427 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3428 struct e1000_filter_info *filter_info =
3429 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3430 struct e1000_5tuple_filter_info filter_5tuple;
3431 struct e1000_5tuple_filter *filter;
3434 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
3435 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3440 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3442 if (filter == NULL) {
3443 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3447 filter_info->fivetuple_mask &= ~(1 << filter->index);
3448 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3451 E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
3452 E1000_FTQF_VF_BP | E1000_FTQF_MASK);
3453 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
3454 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
3455 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
3456 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3457 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3462 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3465 struct e1000_hw *hw;
3466 struct rte_eth_dev_info dev_info;
3467 uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
3470 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3472 #ifdef RTE_LIBRTE_82571_SUPPORT
3473 /* XXX: not bigger than max_rx_pktlen */
3474 if (hw->mac.type == e1000_82571)
3477 eth_igb_infos_get(dev, &dev_info);
3479 /* check that mtu is within the allowed range */
3480 if ((mtu < ETHER_MIN_MTU) ||
3481 (frame_size > dev_info.max_rx_pktlen))
3484 /* refuse mtu that requires the support of scattered packets when this
3485 * feature has not been enabled before. */
3486 if (!dev->data->scattered_rx &&
3487 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
3490 rctl = E1000_READ_REG(hw, E1000_RCTL);
3492 /* switch to jumbo mode if needed */
3493 if (frame_size > ETHER_MAX_LEN) {
3494 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3495 rctl |= E1000_RCTL_LPE;
3497 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3498 rctl &= ~E1000_RCTL_LPE;
3500 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3502 /* update max frame size */
3503 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3505 E1000_WRITE_REG(hw, E1000_RLPML,
3506 dev->data->dev_conf.rxmode.max_rx_pkt_len);
3512 * igb_add_del_ntuple_filter - add or delete a ntuple filter
3515 * dev: Pointer to struct rte_eth_dev.
3516 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3517 * add: if true, add filter, if false, remove filter
3520 * - On success, zero.
3521 * - On failure, a negative value.
3524 igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
3525 struct rte_eth_ntuple_filter *ntuple_filter,
3528 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3531 switch (ntuple_filter->flags) {
3532 case RTE_5TUPLE_FLAGS:
3533 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3534 if (hw->mac.type != e1000_82576)
3537 ret = igb_add_5tuple_filter_82576(dev,
3540 ret = igb_remove_5tuple_filter_82576(dev,
3543 case RTE_2TUPLE_FLAGS:
3544 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3545 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3548 ret = igb_add_2tuple_filter(dev, ntuple_filter);
3550 ret = igb_remove_2tuple_filter(dev, ntuple_filter);
3561 * igb_get_ntuple_filter - get a ntuple filter
3564 * dev: Pointer to struct rte_eth_dev.
3565 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3568 * - On success, zero.
3569 * - On failure, a negative value.
3572 igb_get_ntuple_filter(struct rte_eth_dev *dev,
3573 struct rte_eth_ntuple_filter *ntuple_filter)
3575 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3576 struct e1000_filter_info *filter_info =
3577 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3578 struct e1000_5tuple_filter_info filter_5tuple;
3579 struct e1000_2tuple_filter_info filter_2tuple;
3580 struct e1000_5tuple_filter *p_5tuple_filter;
3581 struct e1000_2tuple_filter *p_2tuple_filter;
3584 switch (ntuple_filter->flags) {
3585 case RTE_5TUPLE_FLAGS:
3586 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3587 if (hw->mac.type != e1000_82576)
3589 memset(&filter_5tuple,
3591 sizeof(struct e1000_5tuple_filter_info));
3592 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3596 p_5tuple_filter = igb_5tuple_filter_lookup_82576(
3597 &filter_info->fivetuple_list,
3599 if (p_5tuple_filter == NULL) {
3600 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3603 ntuple_filter->queue = p_5tuple_filter->queue;
3605 case RTE_2TUPLE_FLAGS:
3606 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3607 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3609 memset(&filter_2tuple,
3611 sizeof(struct e1000_2tuple_filter_info));
3612 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
3615 p_2tuple_filter = igb_2tuple_filter_lookup(
3616 &filter_info->twotuple_list,
3618 if (p_2tuple_filter == NULL) {
3619 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3622 ntuple_filter->queue = p_2tuple_filter->queue;
3633 * igb_ntuple_filter_handle - Handle operations for ntuple filter.
3634 * @dev: pointer to rte_eth_dev structure
3635 * @filter_op:operation will be taken.
3636 * @arg: a pointer to specific structure corresponding to the filter_op
3639 igb_ntuple_filter_handle(struct rte_eth_dev *dev,
3640 enum rte_filter_op filter_op,
3643 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3646 MAC_TYPE_FILTER_SUP(hw->mac.type);
3648 if (filter_op == RTE_ETH_FILTER_NOP)
3652 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3657 switch (filter_op) {
3658 case RTE_ETH_FILTER_ADD:
3659 ret = igb_add_del_ntuple_filter(dev,
3660 (struct rte_eth_ntuple_filter *)arg,
3663 case RTE_ETH_FILTER_DELETE:
3664 ret = igb_add_del_ntuple_filter(dev,
3665 (struct rte_eth_ntuple_filter *)arg,
3668 case RTE_ETH_FILTER_GET:
3669 ret = igb_get_ntuple_filter(dev,
3670 (struct rte_eth_ntuple_filter *)arg);
3673 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3681 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
3686 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3687 if (filter_info->ethertype_filters[i] == ethertype &&
3688 (filter_info->ethertype_mask & (1 << i)))
3695 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
3700 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3701 if (!(filter_info->ethertype_mask & (1 << i))) {
3702 filter_info->ethertype_mask |= 1 << i;
3703 filter_info->ethertype_filters[i] = ethertype;
3711 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
3714 if (idx >= E1000_MAX_ETQF_FILTERS)
3716 filter_info->ethertype_mask &= ~(1 << idx);
3717 filter_info->ethertype_filters[idx] = 0;
3723 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
3724 struct rte_eth_ethertype_filter *filter,
3727 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3728 struct e1000_filter_info *filter_info =
3729 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3733 if (filter->ether_type == ETHER_TYPE_IPv4 ||
3734 filter->ether_type == ETHER_TYPE_IPv6) {
3735 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
3736 " ethertype filter.", filter->ether_type);
3740 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
3741 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
3744 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
3745 PMD_DRV_LOG(ERR, "drop option is unsupported.");
3749 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3750 if (ret >= 0 && add) {
3751 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
3752 filter->ether_type);
3755 if (ret < 0 && !add) {
3756 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3757 filter->ether_type);
3762 ret = igb_ethertype_filter_insert(filter_info,
3763 filter->ether_type);
3765 PMD_DRV_LOG(ERR, "ethertype filters are full.");
3769 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
3770 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
3771 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
3773 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
3777 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
3778 E1000_WRITE_FLUSH(hw);
3784 igb_get_ethertype_filter(struct rte_eth_dev *dev,
3785 struct rte_eth_ethertype_filter *filter)
3787 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3788 struct e1000_filter_info *filter_info =
3789 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3793 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3795 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3796 filter->ether_type);
3800 etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
3801 if (etqf & E1000_ETQF_FILTER_ENABLE) {
3802 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
3804 filter->queue = (etqf & E1000_ETQF_QUEUE) >>
3805 E1000_ETQF_QUEUE_SHIFT;
3813 * igb_ethertype_filter_handle - Handle operations for ethertype filter.
3814 * @dev: pointer to rte_eth_dev structure
3815 * @filter_op:operation will be taken.
3816 * @arg: a pointer to specific structure corresponding to the filter_op
3819 igb_ethertype_filter_handle(struct rte_eth_dev *dev,
3820 enum rte_filter_op filter_op,
3823 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3826 MAC_TYPE_FILTER_SUP(hw->mac.type);
3828 if (filter_op == RTE_ETH_FILTER_NOP)
3832 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3837 switch (filter_op) {
3838 case RTE_ETH_FILTER_ADD:
3839 ret = igb_add_del_ethertype_filter(dev,
3840 (struct rte_eth_ethertype_filter *)arg,
3843 case RTE_ETH_FILTER_DELETE:
3844 ret = igb_add_del_ethertype_filter(dev,
3845 (struct rte_eth_ethertype_filter *)arg,
3848 case RTE_ETH_FILTER_GET:
3849 ret = igb_get_ethertype_filter(dev,
3850 (struct rte_eth_ethertype_filter *)arg);
3853 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3861 eth_igb_filter_ctrl(struct rte_eth_dev *dev,
3862 enum rte_filter_type filter_type,
3863 enum rte_filter_op filter_op,
3868 switch (filter_type) {
3869 case RTE_ETH_FILTER_NTUPLE:
3870 ret = igb_ntuple_filter_handle(dev, filter_op, arg);
3872 case RTE_ETH_FILTER_ETHERTYPE:
3873 ret = igb_ethertype_filter_handle(dev, filter_op, arg);
3875 case RTE_ETH_FILTER_SYN:
3876 ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
3878 case RTE_ETH_FILTER_FLEXIBLE:
3879 ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
3882 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3891 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
3892 struct ether_addr *mc_addr_set,
3893 uint32_t nb_mc_addr)
3895 struct e1000_hw *hw;
3897 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3898 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
3903 igb_timesync_enable(struct rte_eth_dev *dev)
3905 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3908 /* Start incrementing the register used to timestamp PTP packets. */
3909 E1000_WRITE_REG(hw, E1000_TIMINCA, E1000_TIMINCA_INIT);
3911 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3912 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
3914 E1000_ETQF_FILTER_ENABLE |
3917 /* Enable timestamping of received PTP packets. */
3918 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3919 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
3920 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
3922 /* Enable Timestamping of transmitted PTP packets. */
3923 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3924 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
3925 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
3931 igb_timesync_disable(struct rte_eth_dev *dev)
3933 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3936 /* Disable timestamping of transmitted PTP packets. */
3937 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3938 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
3939 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
3941 /* Disable timestamping of received PTP packets. */
3942 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3943 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
3944 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
3946 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3947 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
3949 /* Stop incrementating the System Time registers. */
3950 E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
3956 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3957 struct timespec *timestamp,
3958 uint32_t flags __rte_unused)
3960 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3961 uint32_t tsync_rxctl;
3965 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3966 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
3969 rx_stmpl = E1000_READ_REG(hw, E1000_RXSTMPL);
3970 rx_stmph = E1000_READ_REG(hw, E1000_RXSTMPH);
3972 timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
3973 timestamp->tv_nsec = 0;
3979 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3980 struct timespec *timestamp)
3982 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3983 uint32_t tsync_txctl;
3987 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3988 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
3991 tx_stmpl = E1000_READ_REG(hw, E1000_TXSTMPL);
3992 tx_stmph = E1000_READ_REG(hw, E1000_TXSTMPH);
3994 timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
3995 timestamp->tv_nsec = 0;
4001 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4005 const struct reg_info *reg_group;
4007 while ((reg_group = igb_regs[g_ind++]))
4008 count += igb_reg_group_count(reg_group);
4014 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4018 const struct reg_info *reg_group;
4020 while ((reg_group = igbvf_regs[g_ind++]))
4021 count += igb_reg_group_count(reg_group);
4027 eth_igb_get_regs(struct rte_eth_dev *dev,
4028 struct rte_dev_reg_info *regs)
4030 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4031 uint32_t *data = regs->data;
4034 const struct reg_info *reg_group;
4036 /* Support only full register dump */
4037 if ((regs->length == 0) ||
4038 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
4039 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4041 while ((reg_group = igb_regs[g_ind++]))
4042 count += igb_read_regs_group(dev, &data[count],
4051 igbvf_get_regs(struct rte_eth_dev *dev,
4052 struct rte_dev_reg_info *regs)
4054 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4055 uint32_t *data = regs->data;
4058 const struct reg_info *reg_group;
4060 /* Support only full register dump */
4061 if ((regs->length == 0) ||
4062 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
4063 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4065 while ((reg_group = igbvf_regs[g_ind++]))
4066 count += igb_read_regs_group(dev, &data[count],
4075 eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
4077 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4079 /* Return unit is byte count */
4080 return hw->nvm.word_size * 2;
4084 eth_igb_get_eeprom(struct rte_eth_dev *dev,
4085 struct rte_dev_eeprom_info *in_eeprom)
4087 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4088 struct e1000_nvm_info *nvm = &hw->nvm;
4089 uint16_t *data = in_eeprom->data;
4092 first = in_eeprom->offset >> 1;
4093 length = in_eeprom->length >> 1;
4094 if ((first >= hw->nvm.word_size) ||
4095 ((first + length) >= hw->nvm.word_size))
4098 in_eeprom->magic = hw->vendor_id |
4099 ((uint32_t)hw->device_id << 16);
4101 if ((nvm->ops.read) == NULL)
4104 return nvm->ops.read(hw, first, length, data);
4108 eth_igb_set_eeprom(struct rte_eth_dev *dev,
4109 struct rte_dev_eeprom_info *in_eeprom)
4111 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4112 struct e1000_nvm_info *nvm = &hw->nvm;
4113 uint16_t *data = in_eeprom->data;
4116 first = in_eeprom->offset >> 1;
4117 length = in_eeprom->length >> 1;
4118 if ((first >= hw->nvm.word_size) ||
4119 ((first + length) >= hw->nvm.word_size))
4122 in_eeprom->magic = (uint32_t)hw->vendor_id |
4123 ((uint32_t)hw->device_id << 16);
4125 if ((nvm->ops.write) == NULL)
4127 return nvm->ops.write(hw, first, length, data);
4130 static struct rte_driver pmd_igb_drv = {
4132 .init = rte_igb_pmd_init,
4135 static struct rte_driver pmd_igbvf_drv = {
4137 .init = rte_igbvf_pmd_init,
4142 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4144 struct e1000_hw *hw =
4145 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4146 uint32_t mask = 1 << queue_id;
4148 E1000_WRITE_REG(hw, E1000_EIMC, mask);
4149 E1000_WRITE_FLUSH(hw);
4155 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4157 struct e1000_hw *hw =
4158 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4159 uint32_t mask = 1 << queue_id;
4162 regval = E1000_READ_REG(hw, E1000_EIMS);
4163 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
4164 E1000_WRITE_FLUSH(hw);
4166 rte_intr_enable(&dev->pci_dev->intr_handle);
4172 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
4173 uint8_t index, uint8_t offset)
4175 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
4178 val &= ~((uint32_t)0xFF << offset);
4180 /* write vector and valid bit */
4181 val |= (msix_vector | E1000_IVAR_VALID) << offset;
4183 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
4187 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
4188 uint8_t queue, uint8_t msix_vector)
4192 if (hw->mac.type == e1000_82575) {
4194 tmp = E1000_EICR_RX_QUEUE0 << queue;
4195 else if (direction == 1)
4196 tmp = E1000_EICR_TX_QUEUE0 << queue;
4197 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
4198 } else if (hw->mac.type == e1000_82576) {
4199 if ((direction == 0) || (direction == 1))
4200 eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
4201 ((queue & 0x8) << 1) +
4203 } else if ((hw->mac.type == e1000_82580) ||
4204 (hw->mac.type == e1000_i350) ||
4205 (hw->mac.type == e1000_i354) ||
4206 (hw->mac.type == e1000_i210) ||
4207 (hw->mac.type == e1000_i211)) {
4208 if ((direction == 0) || (direction == 1))
4209 eth_igb_write_ivar(hw, msix_vector,
4211 ((queue & 0x1) << 4) +
4217 /* Sets up the hardware to generate MSI-X interrupts properly
4219 * board private structure
4222 eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
4226 uint32_t tmpval, regval, intr_mask;
4227 struct e1000_hw *hw =
4228 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4231 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4233 /* won't configure msix register if no mapping is done
4234 * between intr vector and event fd
4236 if (!rte_intr_dp_is_en(intr_handle))
4240 /* set interrupt vector for other causes */
4241 if (hw->mac.type == e1000_82575) {
4242 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
4243 /* enable MSI-X PBA support */
4244 tmpval |= E1000_CTRL_EXT_PBA_CLR;
4246 /* Auto-Mask interrupts upon ICR read */
4247 tmpval |= E1000_CTRL_EXT_EIAME;
4248 tmpval |= E1000_CTRL_EXT_IRCA;
4250 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
4252 /* enable msix_other interrupt */
4253 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
4254 regval = E1000_READ_REG(hw, E1000_EIAC);
4255 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
4256 regval = E1000_READ_REG(hw, E1000_EIAM);
4257 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
4258 } else if ((hw->mac.type == e1000_82576) ||
4259 (hw->mac.type == e1000_82580) ||
4260 (hw->mac.type == e1000_i350) ||
4261 (hw->mac.type == e1000_i354) ||
4262 (hw->mac.type == e1000_i210) ||
4263 (hw->mac.type == e1000_i211)) {
4264 /* turn on MSI-X capability first */
4265 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
4266 E1000_GPIE_PBA | E1000_GPIE_EIAME |
4269 intr_mask = (1 << intr_handle->max_intr) - 1;
4270 regval = E1000_READ_REG(hw, E1000_EIAC);
4271 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
4273 /* enable msix_other interrupt */
4274 regval = E1000_READ_REG(hw, E1000_EIMS);
4275 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
4276 tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
4277 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
4280 /* use EIAM to auto-mask when MSI-X interrupt
4281 * is asserted, this saves a register write for every interrupt
4283 intr_mask = (1 << intr_handle->nb_efd) - 1;
4284 regval = E1000_READ_REG(hw, E1000_EIAM);
4285 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
4287 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
4288 eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
4289 intr_handle->intr_vec[queue_id] = vec;
4290 if (vec < intr_handle->nb_efd - 1)
4294 E1000_WRITE_FLUSH(hw);
4298 PMD_REGISTER_DRIVER(pmd_igb_drv);
4299 PMD_REGISTER_DRIVER(pmd_igbvf_drv);