4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
51 #include <rte_atomic.h>
52 #include <rte_malloc.h>
55 #include "e1000_logs.h"
56 #include "base/e1000_api.h"
57 #include "e1000_ethdev.h"
61 * Default values for port configuration
63 #define IGB_DEFAULT_RX_FREE_THRESH 32
64 #define IGB_DEFAULT_RX_PTHRESH 8
65 #define IGB_DEFAULT_RX_HTHRESH 8
66 #define IGB_DEFAULT_RX_WTHRESH 0
68 #define IGB_DEFAULT_TX_PTHRESH 32
69 #define IGB_DEFAULT_TX_HTHRESH 0
70 #define IGB_DEFAULT_TX_WTHRESH 0
72 #define IGB_HKEY_MAX_INDEX 10
74 /* Bit shift and mask */
75 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
76 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
77 #define IGB_8_BIT_WIDTH CHAR_BIT
78 #define IGB_8_BIT_MASK UINT8_MAX
80 /* Additional timesync values. */
81 #define E1000_ETQF_FILTER_1588 3
82 #define E1000_TIMINCA_INCVALUE 16000000
83 #define E1000_TIMINCA_INIT ((0x02 << E1000_TIMINCA_16NS_SHIFT) \
84 | E1000_TIMINCA_INCVALUE)
86 static int eth_igb_configure(struct rte_eth_dev *dev);
87 static int eth_igb_start(struct rte_eth_dev *dev);
88 static void eth_igb_stop(struct rte_eth_dev *dev);
89 static void eth_igb_close(struct rte_eth_dev *dev);
90 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
91 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
92 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
93 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
94 static int eth_igb_link_update(struct rte_eth_dev *dev,
95 int wait_to_complete);
96 static void eth_igb_stats_get(struct rte_eth_dev *dev,
97 struct rte_eth_stats *rte_stats);
98 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
99 static void eth_igb_infos_get(struct rte_eth_dev *dev,
100 struct rte_eth_dev_info *dev_info);
101 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
102 struct rte_eth_dev_info *dev_info);
103 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
104 struct rte_eth_fc_conf *fc_conf);
105 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
106 struct rte_eth_fc_conf *fc_conf);
107 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
109 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
111 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
112 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
113 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
115 static int igb_hardware_init(struct e1000_hw *hw);
116 static void igb_hw_control_acquire(struct e1000_hw *hw);
117 static void igb_hw_control_release(struct e1000_hw *hw);
118 static void igb_init_manageability(struct e1000_hw *hw);
119 static void igb_release_manageability(struct e1000_hw *hw);
121 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
123 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
124 uint16_t vlan_id, int on);
125 static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
126 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
128 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
129 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
130 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
131 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
132 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
133 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
135 static int eth_igb_led_on(struct rte_eth_dev *dev);
136 static int eth_igb_led_off(struct rte_eth_dev *dev);
138 static void igb_intr_disable(struct e1000_hw *hw);
139 static int igb_get_rx_buffer_size(struct e1000_hw *hw);
140 static void eth_igb_rar_set(struct rte_eth_dev *dev,
141 struct ether_addr *mac_addr,
142 uint32_t index, uint32_t pool);
143 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
144 static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
145 struct ether_addr *addr);
147 static void igbvf_intr_disable(struct e1000_hw *hw);
148 static int igbvf_dev_configure(struct rte_eth_dev *dev);
149 static int igbvf_dev_start(struct rte_eth_dev *dev);
150 static void igbvf_dev_stop(struct rte_eth_dev *dev);
151 static void igbvf_dev_close(struct rte_eth_dev *dev);
152 static int eth_igbvf_link_update(struct e1000_hw *hw);
153 static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
154 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
155 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
156 uint16_t vlan_id, int on);
157 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
158 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
159 static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
160 struct ether_addr *addr);
161 static int igbvf_get_reg_length(struct rte_eth_dev *dev);
162 static int igbvf_get_regs(struct rte_eth_dev *dev,
163 struct rte_dev_reg_info *regs);
165 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
166 struct rte_eth_rss_reta_entry64 *reta_conf,
168 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
169 struct rte_eth_rss_reta_entry64 *reta_conf,
172 static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
173 struct rte_eth_syn_filter *filter,
175 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
176 struct rte_eth_syn_filter *filter);
177 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
178 enum rte_filter_op filter_op,
180 static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
181 struct rte_eth_ntuple_filter *ntuple_filter);
182 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
183 struct rte_eth_ntuple_filter *ntuple_filter);
184 static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
185 struct rte_eth_flex_filter *filter,
187 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
188 struct rte_eth_flex_filter *filter);
189 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
190 enum rte_filter_op filter_op,
192 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
193 struct rte_eth_ntuple_filter *ntuple_filter);
194 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
195 struct rte_eth_ntuple_filter *ntuple_filter);
196 static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
197 struct rte_eth_ntuple_filter *filter,
199 static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
200 struct rte_eth_ntuple_filter *filter);
201 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
202 enum rte_filter_op filter_op,
204 static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
205 struct rte_eth_ethertype_filter *filter,
207 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
208 enum rte_filter_op filter_op,
210 static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
211 struct rte_eth_ethertype_filter *filter);
212 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
213 enum rte_filter_type filter_type,
214 enum rte_filter_op filter_op,
216 static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
217 static int eth_igb_get_regs(struct rte_eth_dev *dev,
218 struct rte_dev_reg_info *regs);
219 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
220 static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
221 struct rte_dev_eeprom_info *eeprom);
222 static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
223 struct rte_dev_eeprom_info *eeprom);
224 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
225 struct ether_addr *mc_addr_set,
226 uint32_t nb_mc_addr);
227 static int igb_timesync_enable(struct rte_eth_dev *dev);
228 static int igb_timesync_disable(struct rte_eth_dev *dev);
229 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
230 struct timespec *timestamp,
232 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
233 struct timespec *timestamp);
235 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
237 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
239 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
240 uint8_t queue, uint8_t msix_vector);
241 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
242 uint8_t index, uint8_t offset);
244 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
247 * Define VF Stats MACRO for Non "cleared on read" register
249 #define UPDATE_VF_STAT(reg, last, cur) \
251 u32 latest = E1000_READ_REG(hw, reg); \
252 cur += latest - last; \
257 #define IGB_FC_PAUSE_TIME 0x0680
258 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
259 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
261 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
263 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
266 * The set of PCI devices this driver supports
268 static const struct rte_pci_id pci_id_igb_map[] = {
270 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
271 #include "rte_pci_dev_ids.h"
277 * The set of PCI devices this driver supports (for 82576&I350 VF)
279 static const struct rte_pci_id pci_id_igbvf_map[] = {
281 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
282 #include "rte_pci_dev_ids.h"
287 static const struct eth_dev_ops eth_igb_ops = {
288 .dev_configure = eth_igb_configure,
289 .dev_start = eth_igb_start,
290 .dev_stop = eth_igb_stop,
291 .dev_close = eth_igb_close,
292 .promiscuous_enable = eth_igb_promiscuous_enable,
293 .promiscuous_disable = eth_igb_promiscuous_disable,
294 .allmulticast_enable = eth_igb_allmulticast_enable,
295 .allmulticast_disable = eth_igb_allmulticast_disable,
296 .link_update = eth_igb_link_update,
297 .stats_get = eth_igb_stats_get,
298 .stats_reset = eth_igb_stats_reset,
299 .dev_infos_get = eth_igb_infos_get,
300 .mtu_set = eth_igb_mtu_set,
301 .vlan_filter_set = eth_igb_vlan_filter_set,
302 .vlan_tpid_set = eth_igb_vlan_tpid_set,
303 .vlan_offload_set = eth_igb_vlan_offload_set,
304 .rx_queue_setup = eth_igb_rx_queue_setup,
306 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
307 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
309 .rx_queue_release = eth_igb_rx_queue_release,
310 .rx_queue_count = eth_igb_rx_queue_count,
311 .rx_descriptor_done = eth_igb_rx_descriptor_done,
312 .tx_queue_setup = eth_igb_tx_queue_setup,
313 .tx_queue_release = eth_igb_tx_queue_release,
314 .dev_led_on = eth_igb_led_on,
315 .dev_led_off = eth_igb_led_off,
316 .flow_ctrl_get = eth_igb_flow_ctrl_get,
317 .flow_ctrl_set = eth_igb_flow_ctrl_set,
318 .mac_addr_add = eth_igb_rar_set,
319 .mac_addr_remove = eth_igb_rar_clear,
320 .mac_addr_set = eth_igb_default_mac_addr_set,
321 .reta_update = eth_igb_rss_reta_update,
322 .reta_query = eth_igb_rss_reta_query,
323 .rss_hash_update = eth_igb_rss_hash_update,
324 .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
325 .filter_ctrl = eth_igb_filter_ctrl,
326 .set_mc_addr_list = eth_igb_set_mc_addr_list,
327 .timesync_enable = igb_timesync_enable,
328 .timesync_disable = igb_timesync_disable,
329 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
330 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
331 .get_reg_length = eth_igb_get_reg_length,
332 .get_reg = eth_igb_get_regs,
333 .get_eeprom_length = eth_igb_get_eeprom_length,
334 .get_eeprom = eth_igb_get_eeprom,
335 .set_eeprom = eth_igb_set_eeprom,
339 * dev_ops for virtual function, bare necessities for basic vf
340 * operation have been implemented
342 static const struct eth_dev_ops igbvf_eth_dev_ops = {
343 .dev_configure = igbvf_dev_configure,
344 .dev_start = igbvf_dev_start,
345 .dev_stop = igbvf_dev_stop,
346 .dev_close = igbvf_dev_close,
347 .link_update = eth_igb_link_update,
348 .stats_get = eth_igbvf_stats_get,
349 .stats_reset = eth_igbvf_stats_reset,
350 .vlan_filter_set = igbvf_vlan_filter_set,
351 .dev_infos_get = eth_igbvf_infos_get,
352 .rx_queue_setup = eth_igb_rx_queue_setup,
353 .rx_queue_release = eth_igb_rx_queue_release,
354 .tx_queue_setup = eth_igb_tx_queue_setup,
355 .tx_queue_release = eth_igb_tx_queue_release,
356 .set_mc_addr_list = eth_igb_set_mc_addr_list,
357 .mac_addr_set = igbvf_default_mac_addr_set,
358 .get_reg_length = igbvf_get_reg_length,
359 .get_reg = igbvf_get_regs,
363 * Atomically reads the link status information from global
364 * structure rte_eth_dev.
367 * - Pointer to the structure rte_eth_dev to read from.
368 * - Pointer to the buffer to be saved with the link status.
371 * - On success, zero.
372 * - On failure, negative value.
375 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
376 struct rte_eth_link *link)
378 struct rte_eth_link *dst = link;
379 struct rte_eth_link *src = &(dev->data->dev_link);
381 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
382 *(uint64_t *)src) == 0)
389 * Atomically writes the link status information into global
390 * structure rte_eth_dev.
393 * - Pointer to the structure rte_eth_dev to read from.
394 * - Pointer to the buffer to be saved with the link status.
397 * - On success, zero.
398 * - On failure, negative value.
401 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
402 struct rte_eth_link *link)
404 struct rte_eth_link *dst = &(dev->data->dev_link);
405 struct rte_eth_link *src = link;
407 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
408 *(uint64_t *)src) == 0)
415 igb_intr_enable(struct rte_eth_dev *dev)
417 struct e1000_interrupt *intr =
418 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
419 struct e1000_hw *hw =
420 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
422 E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
423 E1000_WRITE_FLUSH(hw);
427 igb_intr_disable(struct e1000_hw *hw)
429 E1000_WRITE_REG(hw, E1000_IMC, ~0);
430 E1000_WRITE_FLUSH(hw);
433 static inline int32_t
434 igb_pf_reset_hw(struct e1000_hw *hw)
439 status = e1000_reset_hw(hw);
441 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
442 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
443 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
444 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
445 E1000_WRITE_FLUSH(hw);
451 igb_identify_hardware(struct rte_eth_dev *dev)
453 struct e1000_hw *hw =
454 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
456 hw->vendor_id = dev->pci_dev->id.vendor_id;
457 hw->device_id = dev->pci_dev->id.device_id;
458 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
459 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
461 e1000_set_mac_type(hw);
463 /* need to check if it is a vf device below */
467 igb_reset_swfw_lock(struct e1000_hw *hw)
472 * Do mac ops initialization manually here, since we will need
473 * some function pointers set by this call.
475 ret_val = e1000_init_mac_params(hw);
480 * SMBI lock should not fail in this early stage. If this is the case,
481 * it is due to an improper exit of the application.
482 * So force the release of the faulty lock.
484 if (e1000_get_hw_semaphore_generic(hw) < 0) {
485 PMD_DRV_LOG(DEBUG, "SMBI lock released");
487 e1000_put_hw_semaphore_generic(hw);
489 if (hw->mac.ops.acquire_swfw_sync != NULL) {
493 * Phy lock should not fail in this early stage. If this is the case,
494 * it is due to an improper exit of the application.
495 * So force the release of the faulty lock.
497 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
498 if (hw->bus.func > E1000_FUNC_1)
500 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
501 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
504 hw->mac.ops.release_swfw_sync(hw, mask);
507 * This one is more tricky since it is common to all ports; but
508 * swfw_sync retries last long enough (1s) to be almost sure that if
509 * lock can not be taken it is due to an improper lock of the
512 mask = E1000_SWFW_EEP_SM;
513 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
514 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
516 hw->mac.ops.release_swfw_sync(hw, mask);
519 return E1000_SUCCESS;
523 eth_igb_dev_init(struct rte_eth_dev *eth_dev)
526 struct rte_pci_device *pci_dev;
527 struct e1000_hw *hw =
528 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
529 struct e1000_vfta * shadow_vfta =
530 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
531 struct e1000_filter_info *filter_info =
532 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
533 struct e1000_adapter *adapter =
534 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
538 pci_dev = eth_dev->pci_dev;
539 eth_dev->dev_ops = ð_igb_ops;
540 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
541 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
543 /* for secondary processes, we don't initialise any further as primary
544 * has already done this work. Only check we don't need a different
546 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
547 if (eth_dev->data->scattered_rx)
548 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
552 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
554 igb_identify_hardware(eth_dev);
555 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
560 e1000_get_bus_info(hw);
562 /* Reset any pending lock */
563 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
568 /* Finish initialization */
569 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
575 hw->phy.autoneg_wait_to_complete = 0;
576 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
579 if (hw->phy.media_type == e1000_media_type_copper) {
580 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
581 hw->phy.disable_polarity_correction = 0;
582 hw->phy.ms_type = e1000_ms_hw_default;
586 * Start from a known state, this is important in reading the nvm
591 /* Make sure we have a good EEPROM before we read from it */
592 if (e1000_validate_nvm_checksum(hw) < 0) {
594 * Some PCI-E parts fail the first check due to
595 * the link being in sleep state, call it again,
596 * if it fails a second time its a real issue.
598 if (e1000_validate_nvm_checksum(hw) < 0) {
599 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
605 /* Read the permanent MAC address out of the EEPROM */
606 if (e1000_read_mac_addr(hw) != 0) {
607 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
612 /* Allocate memory for storing MAC addresses */
613 eth_dev->data->mac_addrs = rte_zmalloc("e1000",
614 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
615 if (eth_dev->data->mac_addrs == NULL) {
616 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
617 "store MAC addresses",
618 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
623 /* Copy the permanent MAC address */
624 ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]);
626 /* initialize the vfta */
627 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
629 /* Now initialize the hardware */
630 if (igb_hardware_init(hw) != 0) {
631 PMD_INIT_LOG(ERR, "Hardware initialization failed");
632 rte_free(eth_dev->data->mac_addrs);
633 eth_dev->data->mac_addrs = NULL;
637 hw->mac.get_link_status = 1;
638 adapter->stopped = 0;
640 /* Indicate SOL/IDER usage */
641 if (e1000_check_reset_block(hw) < 0) {
642 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
646 /* initialize PF if max_vfs not zero */
647 igb_pf_host_init(eth_dev);
649 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
650 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
651 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
652 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
653 E1000_WRITE_FLUSH(hw);
655 PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
656 eth_dev->data->port_id, pci_dev->id.vendor_id,
657 pci_dev->id.device_id);
659 /* enable support intr */
660 igb_intr_enable(eth_dev);
662 TAILQ_INIT(&filter_info->flex_list);
663 filter_info->flex_mask = 0;
664 TAILQ_INIT(&filter_info->twotuple_list);
665 filter_info->twotuple_mask = 0;
666 TAILQ_INIT(&filter_info->fivetuple_list);
667 filter_info->fivetuple_mask = 0;
672 igb_hw_control_release(hw);
678 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
680 struct rte_pci_device *pci_dev;
682 struct e1000_adapter *adapter =
683 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
685 PMD_INIT_FUNC_TRACE();
687 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
690 hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
691 pci_dev = eth_dev->pci_dev;
693 if (adapter->stopped == 0)
694 eth_igb_close(eth_dev);
696 eth_dev->dev_ops = NULL;
697 eth_dev->rx_pkt_burst = NULL;
698 eth_dev->tx_pkt_burst = NULL;
700 /* Reset any pending lock */
701 igb_reset_swfw_lock(hw);
703 rte_free(eth_dev->data->mac_addrs);
704 eth_dev->data->mac_addrs = NULL;
706 /* uninitialize PF if max_vfs not zero */
707 igb_pf_host_uninit(eth_dev);
709 /* disable uio intr before callback unregister */
710 rte_intr_disable(&(pci_dev->intr_handle));
711 rte_intr_callback_unregister(&(pci_dev->intr_handle),
712 eth_igb_interrupt_handler, (void *)eth_dev);
718 * Virtual Function device init
721 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
723 struct rte_pci_device *pci_dev;
724 struct e1000_adapter *adapter =
725 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
726 struct e1000_hw *hw =
727 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
730 PMD_INIT_FUNC_TRACE();
732 eth_dev->dev_ops = &igbvf_eth_dev_ops;
733 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
734 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
736 /* for secondary processes, we don't initialise any further as primary
737 * has already done this work. Only check we don't need a different
739 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
740 if (eth_dev->data->scattered_rx)
741 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
745 pci_dev = eth_dev->pci_dev;
747 hw->device_id = pci_dev->id.device_id;
748 hw->vendor_id = pci_dev->id.vendor_id;
749 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
750 adapter->stopped = 0;
752 /* Initialize the shared code (base driver) */
753 diag = e1000_setup_init_funcs(hw, TRUE);
755 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
760 /* init_mailbox_params */
761 hw->mbx.ops.init_params(hw);
763 /* Disable the interrupts for VF */
764 igbvf_intr_disable(hw);
766 diag = hw->mac.ops.reset_hw(hw);
768 /* Allocate memory for storing MAC addresses */
769 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
770 hw->mac.rar_entry_count, 0);
771 if (eth_dev->data->mac_addrs == NULL) {
773 "Failed to allocate %d bytes needed to store MAC "
775 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
779 /* Copy the permanent MAC address */
780 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
781 ð_dev->data->mac_addrs[0]);
783 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
785 eth_dev->data->port_id, pci_dev->id.vendor_id,
786 pci_dev->id.device_id, "igb_mac_82576_vf");
792 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
794 struct e1000_adapter *adapter =
795 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
797 PMD_INIT_FUNC_TRACE();
799 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
802 if (adapter->stopped == 0)
803 igbvf_dev_close(eth_dev);
805 eth_dev->dev_ops = NULL;
806 eth_dev->rx_pkt_burst = NULL;
807 eth_dev->tx_pkt_burst = NULL;
809 rte_free(eth_dev->data->mac_addrs);
810 eth_dev->data->mac_addrs = NULL;
815 static struct eth_driver rte_igb_pmd = {
817 .name = "rte_igb_pmd",
818 .id_table = pci_id_igb_map,
819 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
820 RTE_PCI_DRV_DETACHABLE,
822 .eth_dev_init = eth_igb_dev_init,
823 .eth_dev_uninit = eth_igb_dev_uninit,
824 .dev_private_size = sizeof(struct e1000_adapter),
828 * virtual function driver struct
830 static struct eth_driver rte_igbvf_pmd = {
832 .name = "rte_igbvf_pmd",
833 .id_table = pci_id_igbvf_map,
834 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
836 .eth_dev_init = eth_igbvf_dev_init,
837 .eth_dev_uninit = eth_igbvf_dev_uninit,
838 .dev_private_size = sizeof(struct e1000_adapter),
842 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
844 rte_eth_driver_register(&rte_igb_pmd);
849 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
851 struct e1000_hw *hw =
852 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
853 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
854 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
855 rctl |= E1000_RCTL_VFE;
856 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
860 * VF Driver initialization routine.
861 * Invoked one at EAL init time.
862 * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
865 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
867 PMD_INIT_FUNC_TRACE();
869 rte_eth_driver_register(&rte_igbvf_pmd);
874 eth_igb_configure(struct rte_eth_dev *dev)
876 struct e1000_interrupt *intr =
877 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
879 PMD_INIT_FUNC_TRACE();
880 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
881 PMD_INIT_FUNC_TRACE();
887 eth_igb_start(struct rte_eth_dev *dev)
889 struct e1000_hw *hw =
890 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
891 struct e1000_adapter *adapter =
892 E1000_DEV_PRIVATE(dev->data->dev_private);
893 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
896 uint32_t intr_vector = 0;
900 PMD_INIT_FUNC_TRACE();
902 /* Power up the phy. Needed to make the link go Up */
903 e1000_power_up_phy(hw);
906 * Packet Buffer Allocation (PBA)
907 * Writing PBA sets the receive portion of the buffer
908 * the remainder is used for the transmit buffer.
910 if (hw->mac.type == e1000_82575) {
913 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
914 E1000_WRITE_REG(hw, E1000_PBA, pba);
917 /* Put the address into the Receive Address Array */
918 e1000_rar_set(hw, hw->mac.addr, 0);
920 /* Initialize the hardware */
921 if (igb_hardware_init(hw)) {
922 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
925 adapter->stopped = 0;
927 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
929 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
930 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
931 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
932 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
933 E1000_WRITE_FLUSH(hw);
935 /* configure PF module if SRIOV enabled */
936 igb_pf_host_configure(dev);
939 /* check and configure queue intr-vector mapping */
940 if (dev->data->dev_conf.intr_conf.rxq != 0)
941 intr_vector = dev->data->nb_rx_queues;
943 if (rte_intr_efd_enable(intr_handle, intr_vector))
946 if (rte_intr_dp_is_en(intr_handle)) {
947 intr_handle->intr_vec =
948 rte_zmalloc("intr_vec",
949 dev->data->nb_rx_queues * sizeof(int), 0);
950 if (intr_handle->intr_vec == NULL) {
951 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
952 " intr_vec\n", dev->data->nb_rx_queues);
958 /* confiugre msix for rx interrupt */
959 eth_igb_configure_msix_intr(dev);
961 /* Configure for OS presence */
962 igb_init_manageability(hw);
964 eth_igb_tx_init(dev);
966 /* This can fail when allocating mbufs for descriptor rings */
967 ret = eth_igb_rx_init(dev);
969 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
970 igb_dev_clear_queues(dev);
974 e1000_clear_hw_cntrs_base_generic(hw);
977 * VLAN Offload Settings
979 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
980 ETH_VLAN_EXTEND_MASK;
981 eth_igb_vlan_offload_set(dev, mask);
983 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
984 /* Enable VLAN filter since VMDq always use VLAN filter */
985 igb_vmdq_vlan_hw_filter_enable(dev);
988 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
989 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
990 (hw->mac.type == e1000_i211)) {
991 /* Configure EITR with the maximum possible value (0xFFFF) */
992 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
995 /* Setup link speed and duplex */
996 switch (dev->data->dev_conf.link_speed) {
997 case ETH_LINK_SPEED_AUTONEG:
998 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
999 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
1000 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1001 hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
1002 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1003 hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
1005 goto error_invalid_config;
1007 case ETH_LINK_SPEED_10:
1008 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
1009 hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
1010 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1011 hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
1012 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1013 hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
1015 goto error_invalid_config;
1017 case ETH_LINK_SPEED_100:
1018 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
1019 hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
1020 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1021 hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
1022 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1023 hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
1025 goto error_invalid_config;
1027 case ETH_LINK_SPEED_1000:
1028 if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
1029 (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
1030 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
1032 goto error_invalid_config;
1034 case ETH_LINK_SPEED_10000:
1036 goto error_invalid_config;
1038 e1000_setup_link(hw);
1040 /* check if lsc interrupt feature is enabled */
1041 if (dev->data->dev_conf.intr_conf.lsc != 0) {
1042 if (rte_intr_allow_others(intr_handle)) {
1043 rte_intr_callback_register(intr_handle,
1044 eth_igb_interrupt_handler,
1046 eth_igb_lsc_interrupt_setup(dev);
1048 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1049 " no intr multiplex\n");
1053 /* check if rxq interrupt is enabled */
1054 if (dev->data->dev_conf.intr_conf.rxq != 0)
1055 eth_igb_rxq_interrupt_setup(dev);
1058 /* enable uio/vfio intr/eventfd mapping */
1059 rte_intr_enable(intr_handle);
1061 /* resume enabled intr since hw reset */
1062 igb_intr_enable(dev);
1064 PMD_INIT_LOG(DEBUG, "<<");
1068 error_invalid_config:
1069 PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
1070 dev->data->dev_conf.link_speed,
1071 dev->data->dev_conf.link_duplex, dev->data->port_id);
1072 igb_dev_clear_queues(dev);
1076 /*********************************************************************
1078 * This routine disables all traffic on the adapter by issuing a
1079 * global reset on the MAC.
1081 **********************************************************************/
1083 eth_igb_stop(struct rte_eth_dev *dev)
1085 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1086 struct e1000_filter_info *filter_info =
1087 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1088 struct rte_eth_link link;
1089 struct e1000_flex_filter *p_flex;
1090 struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
1091 struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
1092 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1094 igb_intr_disable(hw);
1096 /* disable intr eventfd mapping */
1097 rte_intr_disable(intr_handle);
1099 igb_pf_reset_hw(hw);
1100 E1000_WRITE_REG(hw, E1000_WUC, 0);
1102 /* Set bit for Go Link disconnect */
1103 if (hw->mac.type >= e1000_82580) {
1106 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1107 phpm_reg |= E1000_82580_PM_GO_LINKD;
1108 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1111 /* Power down the phy. Needed to make the link go Down */
1112 if (hw->phy.media_type == e1000_media_type_copper)
1113 e1000_power_down_phy(hw);
1115 e1000_shutdown_fiber_serdes_link(hw);
1117 igb_dev_clear_queues(dev);
1119 /* clear the recorded link status */
1120 memset(&link, 0, sizeof(link));
1121 rte_igb_dev_atomic_write_link_status(dev, &link);
1123 /* Remove all flex filters of the device */
1124 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
1125 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
1128 filter_info->flex_mask = 0;
1130 /* Remove all ntuple filters of the device */
1131 for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
1132 p_5tuple != NULL; p_5tuple = p_5tuple_next) {
1133 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
1134 TAILQ_REMOVE(&filter_info->fivetuple_list,
1138 filter_info->fivetuple_mask = 0;
1139 for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
1140 p_2tuple != NULL; p_2tuple = p_2tuple_next) {
1141 p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
1142 TAILQ_REMOVE(&filter_info->twotuple_list,
1146 filter_info->twotuple_mask = 0;
1149 /* Clean datapath event and queue/vec mapping */
1150 rte_intr_efd_disable(intr_handle);
1151 if (intr_handle->intr_vec != NULL) {
1152 rte_free(intr_handle->intr_vec);
1153 intr_handle->intr_vec = NULL;
1159 eth_igb_close(struct rte_eth_dev *dev)
1161 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1162 struct e1000_adapter *adapter =
1163 E1000_DEV_PRIVATE(dev->data->dev_private);
1164 struct rte_eth_link link;
1166 struct rte_pci_device *pci_dev;
1170 adapter->stopped = 1;
1172 e1000_phy_hw_reset(hw);
1173 igb_release_manageability(hw);
1174 igb_hw_control_release(hw);
1176 /* Clear bit for Go Link disconnect */
1177 if (hw->mac.type >= e1000_82580) {
1180 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1181 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1182 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1185 igb_dev_free_queues(dev);
1188 pci_dev = dev->pci_dev;
1189 if (pci_dev->intr_handle.intr_vec) {
1190 rte_free(pci_dev->intr_handle.intr_vec);
1191 pci_dev->intr_handle.intr_vec = NULL;
1195 memset(&link, 0, sizeof(link));
1196 rte_igb_dev_atomic_write_link_status(dev, &link);
1200 igb_get_rx_buffer_size(struct e1000_hw *hw)
1202 uint32_t rx_buf_size;
1203 if (hw->mac.type == e1000_82576) {
1204 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1205 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1206 /* PBS needs to be translated according to a lookup table */
1207 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1208 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1209 rx_buf_size = (rx_buf_size << 10);
1210 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1211 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1213 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1219 /*********************************************************************
1221 * Initialize the hardware
1223 **********************************************************************/
1225 igb_hardware_init(struct e1000_hw *hw)
1227 uint32_t rx_buf_size;
1230 /* Let the firmware know the OS is in control */
1231 igb_hw_control_acquire(hw);
1234 * These parameters control the automatic generation (Tx) and
1235 * response (Rx) to Ethernet PAUSE frames.
1236 * - High water mark should allow for at least two standard size (1518)
1237 * frames to be received after sending an XOFF.
1238 * - Low water mark works best when it is very near the high water mark.
1239 * This allows the receiver to restart by sending XON when it has
1240 * drained a bit. Here we use an arbitrary value of 1500 which will
1241 * restart after one full frame is pulled from the buffer. There
1242 * could be several smaller frames in the buffer and if so they will
1243 * not trigger the XON until their total number reduces the buffer
1245 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1247 rx_buf_size = igb_get_rx_buffer_size(hw);
1249 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1250 hw->fc.low_water = hw->fc.high_water - 1500;
1251 hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1252 hw->fc.send_xon = 1;
1254 /* Set Flow control, use the tunable location if sane */
1255 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1256 hw->fc.requested_mode = igb_fc_setting;
1258 hw->fc.requested_mode = e1000_fc_none;
1260 /* Issue a global reset */
1261 igb_pf_reset_hw(hw);
1262 E1000_WRITE_REG(hw, E1000_WUC, 0);
1264 diag = e1000_init_hw(hw);
1268 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1269 e1000_get_phy_info(hw);
1270 e1000_check_for_link(hw);
1275 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1277 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1279 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1280 struct e1000_hw_stats *stats =
1281 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1284 if(hw->phy.media_type == e1000_media_type_copper ||
1285 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1287 E1000_READ_REG(hw,E1000_SYMERRS);
1288 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1291 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1292 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1293 stats->scc += E1000_READ_REG(hw, E1000_SCC);
1294 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1296 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1297 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1298 stats->colc += E1000_READ_REG(hw, E1000_COLC);
1299 stats->dc += E1000_READ_REG(hw, E1000_DC);
1300 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1301 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1302 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1304 ** For watchdog management we need to know if we have been
1305 ** paused during the last interval, so capture that here.
1307 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1308 stats->xoffrxc += pause_frames;
1309 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1310 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1311 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1312 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1313 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1314 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1315 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1316 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1317 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1318 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1319 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1320 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1322 /* For the 64-bit byte counters the low dword must be read first. */
1323 /* Both registers clear on the read of the high dword */
1325 stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1326 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1327 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1328 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1330 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1331 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1332 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1333 stats->roc += E1000_READ_REG(hw, E1000_ROC);
1334 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1336 stats->tor += E1000_READ_REG(hw, E1000_TORH);
1337 stats->tot += E1000_READ_REG(hw, E1000_TOTH);
1339 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1340 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1341 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1342 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1343 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1344 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1345 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1346 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1347 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1348 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1350 /* Interrupt Counts */
1352 stats->iac += E1000_READ_REG(hw, E1000_IAC);
1353 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1354 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1355 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1356 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1357 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1358 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1359 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1360 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1362 /* Host to Card Statistics */
1364 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1365 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1366 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1367 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1368 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1369 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1370 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1371 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1372 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1373 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1374 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1375 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1376 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1377 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1379 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1380 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1381 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1382 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1383 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1384 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1386 if (rte_stats == NULL)
1390 rte_stats->ibadcrc = stats->crcerrs;
1391 rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
1392 rte_stats->imissed = stats->mpc;
1393 rte_stats->ierrors = rte_stats->ibadcrc +
1394 rte_stats->ibadlen +
1395 rte_stats->imissed +
1396 stats->rxerrc + stats->algnerrc + stats->cexterr;
1399 rte_stats->oerrors = stats->ecol + stats->latecol;
1401 /* XON/XOFF pause frames */
1402 rte_stats->tx_pause_xon = stats->xontxc;
1403 rte_stats->rx_pause_xon = stats->xonrxc;
1404 rte_stats->tx_pause_xoff = stats->xofftxc;
1405 rte_stats->rx_pause_xoff = stats->xoffrxc;
1407 rte_stats->ipackets = stats->gprc;
1408 rte_stats->opackets = stats->gptc;
1409 rte_stats->ibytes = stats->gorc;
1410 rte_stats->obytes = stats->gotc;
1414 eth_igb_stats_reset(struct rte_eth_dev *dev)
1416 struct e1000_hw_stats *hw_stats =
1417 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1419 /* HW registers are cleared on read */
1420 eth_igb_stats_get(dev, NULL);
1422 /* Reset software totals */
1423 memset(hw_stats, 0, sizeof(*hw_stats));
1427 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1429 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1430 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1431 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1433 /* Good Rx packets, include VF loopback */
1434 UPDATE_VF_STAT(E1000_VFGPRC,
1435 hw_stats->last_gprc, hw_stats->gprc);
1437 /* Good Rx octets, include VF loopback */
1438 UPDATE_VF_STAT(E1000_VFGORC,
1439 hw_stats->last_gorc, hw_stats->gorc);
1441 /* Good Tx packets, include VF loopback */
1442 UPDATE_VF_STAT(E1000_VFGPTC,
1443 hw_stats->last_gptc, hw_stats->gptc);
1445 /* Good Tx octets, include VF loopback */
1446 UPDATE_VF_STAT(E1000_VFGOTC,
1447 hw_stats->last_gotc, hw_stats->gotc);
1449 /* Rx Multicst packets */
1450 UPDATE_VF_STAT(E1000_VFMPRC,
1451 hw_stats->last_mprc, hw_stats->mprc);
1453 /* Good Rx loopback packets */
1454 UPDATE_VF_STAT(E1000_VFGPRLBC,
1455 hw_stats->last_gprlbc, hw_stats->gprlbc);
1457 /* Good Rx loopback octets */
1458 UPDATE_VF_STAT(E1000_VFGORLBC,
1459 hw_stats->last_gorlbc, hw_stats->gorlbc);
1461 /* Good Tx loopback packets */
1462 UPDATE_VF_STAT(E1000_VFGPTLBC,
1463 hw_stats->last_gptlbc, hw_stats->gptlbc);
1465 /* Good Tx loopback octets */
1466 UPDATE_VF_STAT(E1000_VFGOTLBC,
1467 hw_stats->last_gotlbc, hw_stats->gotlbc);
1469 if (rte_stats == NULL)
1472 rte_stats->ipackets = hw_stats->gprc;
1473 rte_stats->ibytes = hw_stats->gorc;
1474 rte_stats->opackets = hw_stats->gptc;
1475 rte_stats->obytes = hw_stats->gotc;
1476 rte_stats->imcasts = hw_stats->mprc;
1477 rte_stats->ilbpackets = hw_stats->gprlbc;
1478 rte_stats->ilbbytes = hw_stats->gorlbc;
1479 rte_stats->olbpackets = hw_stats->gptlbc;
1480 rte_stats->olbbytes = hw_stats->gotlbc;
1485 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1487 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1488 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1490 /* Sync HW register to the last stats */
1491 eth_igbvf_stats_get(dev, NULL);
1493 /* reset HW current stats*/
1494 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1495 offsetof(struct e1000_vf_stats, gprc));
1500 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1502 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1504 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1505 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1506 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1507 dev_info->rx_offload_capa =
1508 DEV_RX_OFFLOAD_VLAN_STRIP |
1509 DEV_RX_OFFLOAD_IPV4_CKSUM |
1510 DEV_RX_OFFLOAD_UDP_CKSUM |
1511 DEV_RX_OFFLOAD_TCP_CKSUM;
1512 dev_info->tx_offload_capa =
1513 DEV_TX_OFFLOAD_VLAN_INSERT |
1514 DEV_TX_OFFLOAD_IPV4_CKSUM |
1515 DEV_TX_OFFLOAD_UDP_CKSUM |
1516 DEV_TX_OFFLOAD_TCP_CKSUM |
1517 DEV_TX_OFFLOAD_SCTP_CKSUM;
1519 switch (hw->mac.type) {
1521 dev_info->max_rx_queues = 4;
1522 dev_info->max_tx_queues = 4;
1523 dev_info->max_vmdq_pools = 0;
1527 dev_info->max_rx_queues = 16;
1528 dev_info->max_tx_queues = 16;
1529 dev_info->max_vmdq_pools = ETH_8_POOLS;
1530 dev_info->vmdq_queue_num = 16;
1534 dev_info->max_rx_queues = 8;
1535 dev_info->max_tx_queues = 8;
1536 dev_info->max_vmdq_pools = ETH_8_POOLS;
1537 dev_info->vmdq_queue_num = 8;
1541 dev_info->max_rx_queues = 8;
1542 dev_info->max_tx_queues = 8;
1543 dev_info->max_vmdq_pools = ETH_8_POOLS;
1544 dev_info->vmdq_queue_num = 8;
1548 dev_info->max_rx_queues = 8;
1549 dev_info->max_tx_queues = 8;
1553 dev_info->max_rx_queues = 4;
1554 dev_info->max_tx_queues = 4;
1555 dev_info->max_vmdq_pools = 0;
1559 dev_info->max_rx_queues = 2;
1560 dev_info->max_tx_queues = 2;
1561 dev_info->max_vmdq_pools = 0;
1565 /* Should not happen */
1568 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
1569 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1570 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
1572 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1574 .pthresh = IGB_DEFAULT_RX_PTHRESH,
1575 .hthresh = IGB_DEFAULT_RX_HTHRESH,
1576 .wthresh = IGB_DEFAULT_RX_WTHRESH,
1578 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1582 dev_info->default_txconf = (struct rte_eth_txconf) {
1584 .pthresh = IGB_DEFAULT_TX_PTHRESH,
1585 .hthresh = IGB_DEFAULT_TX_HTHRESH,
1586 .wthresh = IGB_DEFAULT_TX_WTHRESH,
1593 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1595 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1597 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1598 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1599 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1600 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1601 DEV_RX_OFFLOAD_IPV4_CKSUM |
1602 DEV_RX_OFFLOAD_UDP_CKSUM |
1603 DEV_RX_OFFLOAD_TCP_CKSUM;
1604 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1605 DEV_TX_OFFLOAD_IPV4_CKSUM |
1606 DEV_TX_OFFLOAD_UDP_CKSUM |
1607 DEV_TX_OFFLOAD_TCP_CKSUM |
1608 DEV_TX_OFFLOAD_SCTP_CKSUM;
1609 switch (hw->mac.type) {
1611 dev_info->max_rx_queues = 2;
1612 dev_info->max_tx_queues = 2;
1614 case e1000_vfadapt_i350:
1615 dev_info->max_rx_queues = 1;
1616 dev_info->max_tx_queues = 1;
1619 /* Should not happen */
1623 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1625 .pthresh = IGB_DEFAULT_RX_PTHRESH,
1626 .hthresh = IGB_DEFAULT_RX_HTHRESH,
1627 .wthresh = IGB_DEFAULT_RX_WTHRESH,
1629 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1633 dev_info->default_txconf = (struct rte_eth_txconf) {
1635 .pthresh = IGB_DEFAULT_TX_PTHRESH,
1636 .hthresh = IGB_DEFAULT_TX_HTHRESH,
1637 .wthresh = IGB_DEFAULT_TX_WTHRESH,
1643 /* return 0 means link status changed, -1 means not changed */
1645 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1647 struct e1000_hw *hw =
1648 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1649 struct rte_eth_link link, old;
1650 int link_check, count;
1653 hw->mac.get_link_status = 1;
1655 /* possible wait-to-complete in up to 9 seconds */
1656 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
1657 /* Read the real link status */
1658 switch (hw->phy.media_type) {
1659 case e1000_media_type_copper:
1660 /* Do the work to read phy */
1661 e1000_check_for_link(hw);
1662 link_check = !hw->mac.get_link_status;
1665 case e1000_media_type_fiber:
1666 e1000_check_for_link(hw);
1667 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1671 case e1000_media_type_internal_serdes:
1672 e1000_check_for_link(hw);
1673 link_check = hw->mac.serdes_has_link;
1676 /* VF device is type_unknown */
1677 case e1000_media_type_unknown:
1678 eth_igbvf_link_update(hw);
1679 link_check = !hw->mac.get_link_status;
1685 if (link_check || wait_to_complete == 0)
1687 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
1689 memset(&link, 0, sizeof(link));
1690 rte_igb_dev_atomic_read_link_status(dev, &link);
1693 /* Now we check if a transition has happened */
1695 hw->mac.ops.get_link_up_info(hw, &link.link_speed,
1697 link.link_status = 1;
1698 } else if (!link_check) {
1699 link.link_speed = 0;
1700 link.link_duplex = 0;
1701 link.link_status = 0;
1703 rte_igb_dev_atomic_write_link_status(dev, &link);
1706 if (old.link_status == link.link_status)
1714 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
1715 * For ASF and Pass Through versions of f/w this means
1716 * that the driver is loaded.
1719 igb_hw_control_acquire(struct e1000_hw *hw)
1723 /* Let firmware know the driver has taken over */
1724 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1725 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1729 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
1730 * For ASF and Pass Through versions of f/w this means that the
1731 * driver is no longer loaded.
1734 igb_hw_control_release(struct e1000_hw *hw)
1738 /* Let firmware taken over control of h/w */
1739 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1740 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
1741 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1745 * Bit of a misnomer, what this really means is
1746 * to enable OS management of the system... aka
1747 * to disable special hardware management features.
1750 igb_init_manageability(struct e1000_hw *hw)
1752 if (e1000_enable_mng_pass_thru(hw)) {
1753 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
1754 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1756 /* disable hardware interception of ARP */
1757 manc &= ~(E1000_MANC_ARP_EN);
1759 /* enable receiving management packets to the host */
1760 manc |= E1000_MANC_EN_MNG2HOST;
1761 manc2h |= 1 << 5; /* Mng Port 623 */
1762 manc2h |= 1 << 6; /* Mng Port 664 */
1763 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
1764 E1000_WRITE_REG(hw, E1000_MANC, manc);
1769 igb_release_manageability(struct e1000_hw *hw)
1771 if (e1000_enable_mng_pass_thru(hw)) {
1772 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1774 manc |= E1000_MANC_ARP_EN;
1775 manc &= ~E1000_MANC_EN_MNG2HOST;
1777 E1000_WRITE_REG(hw, E1000_MANC, manc);
1782 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
1784 struct e1000_hw *hw =
1785 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1788 rctl = E1000_READ_REG(hw, E1000_RCTL);
1789 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1790 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1794 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
1796 struct e1000_hw *hw =
1797 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1800 rctl = E1000_READ_REG(hw, E1000_RCTL);
1801 rctl &= (~E1000_RCTL_UPE);
1802 if (dev->data->all_multicast == 1)
1803 rctl |= E1000_RCTL_MPE;
1805 rctl &= (~E1000_RCTL_MPE);
1806 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1810 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
1812 struct e1000_hw *hw =
1813 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1816 rctl = E1000_READ_REG(hw, E1000_RCTL);
1817 rctl |= E1000_RCTL_MPE;
1818 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1822 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
1824 struct e1000_hw *hw =
1825 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1828 if (dev->data->promiscuous == 1)
1829 return; /* must remain in all_multicast mode */
1830 rctl = E1000_READ_REG(hw, E1000_RCTL);
1831 rctl &= (~E1000_RCTL_MPE);
1832 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1836 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1838 struct e1000_hw *hw =
1839 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1840 struct e1000_vfta * shadow_vfta =
1841 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1846 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
1847 E1000_VFTA_ENTRY_MASK);
1848 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
1849 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
1854 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
1856 /* update local VFTA copy */
1857 shadow_vfta->vfta[vid_idx] = vfta;
1863 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1865 struct e1000_hw *hw =
1866 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1867 uint32_t reg = ETHER_TYPE_VLAN ;
1869 reg |= (tpid << 16);
1870 E1000_WRITE_REG(hw, E1000_VET, reg);
1874 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1876 struct e1000_hw *hw =
1877 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1880 /* Filter Table Disable */
1881 reg = E1000_READ_REG(hw, E1000_RCTL);
1882 reg &= ~E1000_RCTL_CFIEN;
1883 reg &= ~E1000_RCTL_VFE;
1884 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1888 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1890 struct e1000_hw *hw =
1891 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1892 struct e1000_vfta * shadow_vfta =
1893 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1897 /* Filter Table Enable, CFI not used for packet acceptance */
1898 reg = E1000_READ_REG(hw, E1000_RCTL);
1899 reg &= ~E1000_RCTL_CFIEN;
1900 reg |= E1000_RCTL_VFE;
1901 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1903 /* restore VFTA table */
1904 for (i = 0; i < IGB_VFTA_SIZE; i++)
1905 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
1909 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
1911 struct e1000_hw *hw =
1912 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1915 /* VLAN Mode Disable */
1916 reg = E1000_READ_REG(hw, E1000_CTRL);
1917 reg &= ~E1000_CTRL_VME;
1918 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1922 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
1924 struct e1000_hw *hw =
1925 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1928 /* VLAN Mode Enable */
1929 reg = E1000_READ_REG(hw, E1000_CTRL);
1930 reg |= E1000_CTRL_VME;
1931 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1935 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1937 struct e1000_hw *hw =
1938 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1941 /* CTRL_EXT: Extended VLAN */
1942 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1943 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
1944 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1946 /* Update maximum packet length */
1947 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1948 E1000_WRITE_REG(hw, E1000_RLPML,
1949 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1954 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1956 struct e1000_hw *hw =
1957 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1960 /* CTRL_EXT: Extended VLAN */
1961 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1962 reg |= E1000_CTRL_EXT_EXTEND_VLAN;
1963 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1965 /* Update maximum packet length */
1966 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1967 E1000_WRITE_REG(hw, E1000_RLPML,
1968 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1973 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1975 if(mask & ETH_VLAN_STRIP_MASK){
1976 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1977 igb_vlan_hw_strip_enable(dev);
1979 igb_vlan_hw_strip_disable(dev);
1982 if(mask & ETH_VLAN_FILTER_MASK){
1983 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1984 igb_vlan_hw_filter_enable(dev);
1986 igb_vlan_hw_filter_disable(dev);
1989 if(mask & ETH_VLAN_EXTEND_MASK){
1990 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1991 igb_vlan_hw_extend_enable(dev);
1993 igb_vlan_hw_extend_disable(dev);
1999 * It enables the interrupt mask and then enable the interrupt.
2002 * Pointer to struct rte_eth_dev.
2005 * - On success, zero.
2006 * - On failure, a negative value.
2009 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
2011 struct e1000_interrupt *intr =
2012 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2014 intr->mask |= E1000_ICR_LSC;
2020 /* It clears the interrupt causes and enables the interrupt.
2021 * It will be called once only during nic initialized.
2024 * Pointer to struct rte_eth_dev.
2027 * - On success, zero.
2028 * - On failure, a negative value.
2030 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
2032 uint32_t mask, regval;
2033 struct e1000_hw *hw =
2034 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2035 struct rte_eth_dev_info dev_info;
2037 memset(&dev_info, 0, sizeof(dev_info));
2038 eth_igb_infos_get(dev, &dev_info);
2040 mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
2041 regval = E1000_READ_REG(hw, E1000_EIMS);
2042 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
2049 * It reads ICR and gets interrupt causes, check it and set a bit flag
2050 * to update link status.
2053 * Pointer to struct rte_eth_dev.
2056 * - On success, zero.
2057 * - On failure, a negative value.
2060 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
2063 struct e1000_hw *hw =
2064 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2065 struct e1000_interrupt *intr =
2066 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2068 igb_intr_disable(hw);
2070 /* read-on-clear nic registers here */
2071 icr = E1000_READ_REG(hw, E1000_ICR);
2074 if (icr & E1000_ICR_LSC) {
2075 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
2078 if (icr & E1000_ICR_VMMB)
2079 intr->flags |= E1000_FLAG_MAILBOX;
2085 * It executes link_update after knowing an interrupt is prsent.
2088 * Pointer to struct rte_eth_dev.
2091 * - On success, zero.
2092 * - On failure, a negative value.
2095 eth_igb_interrupt_action(struct rte_eth_dev *dev)
2097 struct e1000_hw *hw =
2098 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2099 struct e1000_interrupt *intr =
2100 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2101 uint32_t tctl, rctl;
2102 struct rte_eth_link link;
2105 if (intr->flags & E1000_FLAG_MAILBOX) {
2106 igb_pf_mbx_process(dev);
2107 intr->flags &= ~E1000_FLAG_MAILBOX;
2110 igb_intr_enable(dev);
2111 rte_intr_enable(&(dev->pci_dev->intr_handle));
2113 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
2114 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
2116 /* set get_link_status to check register later */
2117 hw->mac.get_link_status = 1;
2118 ret = eth_igb_link_update(dev, 0);
2120 /* check if link has changed */
2124 memset(&link, 0, sizeof(link));
2125 rte_igb_dev_atomic_read_link_status(dev, &link);
2126 if (link.link_status) {
2128 " Port %d: Link Up - speed %u Mbps - %s",
2130 (unsigned)link.link_speed,
2131 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2132 "full-duplex" : "half-duplex");
2134 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2135 dev->data->port_id);
2137 PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
2138 dev->pci_dev->addr.domain,
2139 dev->pci_dev->addr.bus,
2140 dev->pci_dev->addr.devid,
2141 dev->pci_dev->addr.function);
2142 tctl = E1000_READ_REG(hw, E1000_TCTL);
2143 rctl = E1000_READ_REG(hw, E1000_RCTL);
2144 if (link.link_status) {
2146 tctl |= E1000_TCTL_EN;
2147 rctl |= E1000_RCTL_EN;
2150 tctl &= ~E1000_TCTL_EN;
2151 rctl &= ~E1000_RCTL_EN;
2153 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2154 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2155 E1000_WRITE_FLUSH(hw);
2156 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
2163 * Interrupt handler which shall be registered at first.
2166 * Pointer to interrupt handle.
2168 * The address of parameter (struct rte_eth_dev *) regsitered before.
2174 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
2177 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2179 eth_igb_interrupt_get_status(dev);
2180 eth_igb_interrupt_action(dev);
2184 eth_igb_led_on(struct rte_eth_dev *dev)
2186 struct e1000_hw *hw;
2188 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2189 return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
2193 eth_igb_led_off(struct rte_eth_dev *dev)
2195 struct e1000_hw *hw;
2197 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2198 return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
2202 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2204 struct e1000_hw *hw;
2209 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2210 fc_conf->pause_time = hw->fc.pause_time;
2211 fc_conf->high_water = hw->fc.high_water;
2212 fc_conf->low_water = hw->fc.low_water;
2213 fc_conf->send_xon = hw->fc.send_xon;
2214 fc_conf->autoneg = hw->mac.autoneg;
2217 * Return rx_pause and tx_pause status according to actual setting of
2218 * the TFCE and RFCE bits in the CTRL register.
2220 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2221 if (ctrl & E1000_CTRL_TFCE)
2226 if (ctrl & E1000_CTRL_RFCE)
2231 if (rx_pause && tx_pause)
2232 fc_conf->mode = RTE_FC_FULL;
2234 fc_conf->mode = RTE_FC_RX_PAUSE;
2236 fc_conf->mode = RTE_FC_TX_PAUSE;
2238 fc_conf->mode = RTE_FC_NONE;
2244 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2246 struct e1000_hw *hw;
2248 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
2254 uint32_t rx_buf_size;
2255 uint32_t max_high_water;
2258 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2259 if (fc_conf->autoneg != hw->mac.autoneg)
2261 rx_buf_size = igb_get_rx_buffer_size(hw);
2262 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2264 /* At least reserve one Ethernet frame for watermark */
2265 max_high_water = rx_buf_size - ETHER_MAX_LEN;
2266 if ((fc_conf->high_water > max_high_water) ||
2267 (fc_conf->high_water < fc_conf->low_water)) {
2268 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
2269 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
2273 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
2274 hw->fc.pause_time = fc_conf->pause_time;
2275 hw->fc.high_water = fc_conf->high_water;
2276 hw->fc.low_water = fc_conf->low_water;
2277 hw->fc.send_xon = fc_conf->send_xon;
2279 err = e1000_setup_link_generic(hw);
2280 if (err == E1000_SUCCESS) {
2282 /* check if we want to forward MAC frames - driver doesn't have native
2283 * capability to do that, so we'll write the registers ourselves */
2285 rctl = E1000_READ_REG(hw, E1000_RCTL);
2287 /* set or clear MFLCN.PMCF bit depending on configuration */
2288 if (fc_conf->mac_ctrl_frame_fwd != 0)
2289 rctl |= E1000_RCTL_PMCF;
2291 rctl &= ~E1000_RCTL_PMCF;
2293 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2294 E1000_WRITE_FLUSH(hw);
2299 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
2303 #define E1000_RAH_POOLSEL_SHIFT (18)
2305 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2306 uint32_t index, __rte_unused uint32_t pool)
2308 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2311 e1000_rar_set(hw, mac_addr->addr_bytes, index);
2312 rah = E1000_READ_REG(hw, E1000_RAH(index));
2313 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
2314 E1000_WRITE_REG(hw, E1000_RAH(index), rah);
2318 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
2320 uint8_t addr[ETHER_ADDR_LEN];
2321 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2323 memset(addr, 0, sizeof(addr));
2325 e1000_rar_set(hw, addr, index);
2329 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
2330 struct ether_addr *addr)
2332 eth_igb_rar_clear(dev, 0);
2334 eth_igb_rar_set(dev, (void *)addr, 0, 0);
2337 * Virtual Function operations
2340 igbvf_intr_disable(struct e1000_hw *hw)
2342 PMD_INIT_FUNC_TRACE();
2344 /* Clear interrupt mask to stop from interrupts being generated */
2345 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
2347 E1000_WRITE_FLUSH(hw);
2351 igbvf_stop_adapter(struct rte_eth_dev *dev)
2355 struct rte_eth_dev_info dev_info;
2356 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2358 memset(&dev_info, 0, sizeof(dev_info));
2359 eth_igbvf_infos_get(dev, &dev_info);
2361 /* Clear interrupt mask to stop from interrupts being generated */
2362 igbvf_intr_disable(hw);
2364 /* Clear any pending interrupts, flush previous writes */
2365 E1000_READ_REG(hw, E1000_EICR);
2367 /* Disable the transmit unit. Each queue must be disabled. */
2368 for (i = 0; i < dev_info.max_tx_queues; i++)
2369 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
2371 /* Disable the receive unit by stopping each queue */
2372 for (i = 0; i < dev_info.max_rx_queues; i++) {
2373 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
2374 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
2375 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
2376 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
2380 /* flush all queues disables */
2381 E1000_WRITE_FLUSH(hw);
2385 static int eth_igbvf_link_update(struct e1000_hw *hw)
2387 struct e1000_mbx_info *mbx = &hw->mbx;
2388 struct e1000_mac_info *mac = &hw->mac;
2389 int ret_val = E1000_SUCCESS;
2391 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
2394 * We only want to run this if there has been a rst asserted.
2395 * in this case that could mean a link change, device reset,
2396 * or a virtual function reset
2399 /* If we were hit with a reset or timeout drop the link */
2400 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
2401 mac->get_link_status = TRUE;
2403 if (!mac->get_link_status)
2406 /* if link status is down no point in checking to see if pf is up */
2407 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
2410 /* if we passed all the tests above then the link is up and we no
2411 * longer need to check for link */
2412 mac->get_link_status = FALSE;
2420 igbvf_dev_configure(struct rte_eth_dev *dev)
2422 struct rte_eth_conf* conf = &dev->data->dev_conf;
2424 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
2425 dev->data->port_id);
2428 * VF has no ability to enable/disable HW CRC
2429 * Keep the persistent behavior the same as Host PF
2431 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
2432 if (!conf->rxmode.hw_strip_crc) {
2433 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
2434 conf->rxmode.hw_strip_crc = 1;
2437 if (conf->rxmode.hw_strip_crc) {
2438 PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
2439 conf->rxmode.hw_strip_crc = 0;
2447 igbvf_dev_start(struct rte_eth_dev *dev)
2449 struct e1000_hw *hw =
2450 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2451 struct e1000_adapter *adapter =
2452 E1000_DEV_PRIVATE(dev->data->dev_private);
2455 PMD_INIT_FUNC_TRACE();
2457 hw->mac.ops.reset_hw(hw);
2458 adapter->stopped = 0;
2461 igbvf_set_vfta_all(dev,1);
2463 eth_igbvf_tx_init(dev);
2465 /* This can fail when allocating mbufs for descriptor rings */
2466 ret = eth_igbvf_rx_init(dev);
2468 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2469 igb_dev_clear_queues(dev);
2477 igbvf_dev_stop(struct rte_eth_dev *dev)
2479 PMD_INIT_FUNC_TRACE();
2481 igbvf_stop_adapter(dev);
2484 * Clear what we set, but we still keep shadow_vfta to
2485 * restore after device starts
2487 igbvf_set_vfta_all(dev,0);
2489 igb_dev_clear_queues(dev);
2493 igbvf_dev_close(struct rte_eth_dev *dev)
2495 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2496 struct e1000_adapter *adapter =
2497 E1000_DEV_PRIVATE(dev->data->dev_private);
2499 PMD_INIT_FUNC_TRACE();
2503 igbvf_dev_stop(dev);
2504 adapter->stopped = 1;
2505 igb_dev_free_queues(dev);
2508 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
2510 struct e1000_mbx_info *mbx = &hw->mbx;
2513 /* After set vlan, vlan strip will also be enabled in igb driver*/
2514 msgbuf[0] = E1000_VF_SET_VLAN;
2516 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
2518 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
2520 return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
2523 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
2525 struct e1000_hw *hw =
2526 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2527 struct e1000_vfta * shadow_vfta =
2528 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2529 int i = 0, j = 0, vfta = 0, mask = 1;
2531 for (i = 0; i < IGB_VFTA_SIZE; i++){
2532 vfta = shadow_vfta->vfta[i];
2535 for (j = 0; j < 32; j++){
2538 (uint16_t)((i<<5)+j), on);
2547 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2549 struct e1000_hw *hw =
2550 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2551 struct e1000_vfta * shadow_vfta =
2552 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2553 uint32_t vid_idx = 0;
2554 uint32_t vid_bit = 0;
2557 PMD_INIT_FUNC_TRACE();
2559 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
2560 ret = igbvf_set_vfta(hw, vlan_id, !!on);
2562 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
2565 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
2566 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
2568 /*Save what we set and retore it after device reset*/
2570 shadow_vfta->vfta[vid_idx] |= vid_bit;
2572 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
2578 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
2580 struct e1000_hw *hw =
2581 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2583 /* index is not used by rar_set() */
2584 hw->mac.ops.rar_set(hw, (void *)addr, 0);
2589 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
2590 struct rte_eth_rss_reta_entry64 *reta_conf,
2595 uint16_t idx, shift;
2596 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2598 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2599 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2600 "(%d) doesn't match the number hardware can supported "
2601 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2605 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2606 idx = i / RTE_RETA_GROUP_SIZE;
2607 shift = i % RTE_RETA_GROUP_SIZE;
2608 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2612 if (mask == IGB_4_BIT_MASK)
2615 r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2616 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
2617 if (mask & (0x1 << j))
2618 reta |= reta_conf[idx].reta[shift + j] <<
2621 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
2623 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
2630 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
2631 struct rte_eth_rss_reta_entry64 *reta_conf,
2636 uint16_t idx, shift;
2637 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2639 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2640 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2641 "(%d) doesn't match the number hardware can supported "
2642 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2646 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2647 idx = i / RTE_RETA_GROUP_SIZE;
2648 shift = i % RTE_RETA_GROUP_SIZE;
2649 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2653 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2654 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
2655 if (mask & (0x1 << j))
2656 reta_conf[idx].reta[shift + j] =
2657 ((reta >> (CHAR_BIT * j)) &
2665 #define MAC_TYPE_FILTER_SUP(type) do {\
2666 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
2667 (type) != e1000_82576)\
2672 eth_igb_syn_filter_set(struct rte_eth_dev *dev,
2673 struct rte_eth_syn_filter *filter,
2676 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2677 uint32_t synqf, rfctl;
2679 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2682 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2685 if (synqf & E1000_SYN_FILTER_ENABLE)
2688 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
2689 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
2691 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2692 if (filter->hig_pri)
2693 rfctl |= E1000_RFCTL_SYNQFP;
2695 rfctl &= ~E1000_RFCTL_SYNQFP;
2697 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
2699 if (!(synqf & E1000_SYN_FILTER_ENABLE))
2704 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
2705 E1000_WRITE_FLUSH(hw);
2710 eth_igb_syn_filter_get(struct rte_eth_dev *dev,
2711 struct rte_eth_syn_filter *filter)
2713 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2714 uint32_t synqf, rfctl;
2716 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2717 if (synqf & E1000_SYN_FILTER_ENABLE) {
2718 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2719 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
2720 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
2721 E1000_SYN_FILTER_QUEUE_SHIFT);
2729 eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
2730 enum rte_filter_op filter_op,
2733 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2736 MAC_TYPE_FILTER_SUP(hw->mac.type);
2738 if (filter_op == RTE_ETH_FILTER_NOP)
2742 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
2747 switch (filter_op) {
2748 case RTE_ETH_FILTER_ADD:
2749 ret = eth_igb_syn_filter_set(dev,
2750 (struct rte_eth_syn_filter *)arg,
2753 case RTE_ETH_FILTER_DELETE:
2754 ret = eth_igb_syn_filter_set(dev,
2755 (struct rte_eth_syn_filter *)arg,
2758 case RTE_ETH_FILTER_GET:
2759 ret = eth_igb_syn_filter_get(dev,
2760 (struct rte_eth_syn_filter *)arg);
2763 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
2771 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
2772 if ((type) != e1000_82580 && (type) != e1000_i350)\
2776 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
2778 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
2779 struct e1000_2tuple_filter_info *filter_info)
2781 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2783 if (filter->priority > E1000_2TUPLE_MAX_PRI)
2784 return -EINVAL; /* filter index is out of range. */
2785 if (filter->tcp_flags > TCP_FLAG_ALL)
2786 return -EINVAL; /* flags is invalid. */
2788 switch (filter->dst_port_mask) {
2790 filter_info->dst_port_mask = 0;
2791 filter_info->dst_port = filter->dst_port;
2794 filter_info->dst_port_mask = 1;
2797 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
2801 switch (filter->proto_mask) {
2803 filter_info->proto_mask = 0;
2804 filter_info->proto = filter->proto;
2807 filter_info->proto_mask = 1;
2810 PMD_DRV_LOG(ERR, "invalid protocol mask.");
2814 filter_info->priority = (uint8_t)filter->priority;
2815 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
2816 filter_info->tcp_flags = filter->tcp_flags;
2818 filter_info->tcp_flags = 0;
2823 static inline struct e1000_2tuple_filter *
2824 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
2825 struct e1000_2tuple_filter_info *key)
2827 struct e1000_2tuple_filter *it;
2829 TAILQ_FOREACH(it, filter_list, entries) {
2830 if (memcmp(key, &it->filter_info,
2831 sizeof(struct e1000_2tuple_filter_info)) == 0) {
2839 * igb_add_2tuple_filter - add a 2tuple filter
2842 * dev: Pointer to struct rte_eth_dev.
2843 * ntuple_filter: ponter to the filter that will be added.
2846 * - On success, zero.
2847 * - On failure, a negative value.
2850 igb_add_2tuple_filter(struct rte_eth_dev *dev,
2851 struct rte_eth_ntuple_filter *ntuple_filter)
2853 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2854 struct e1000_filter_info *filter_info =
2855 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2856 struct e1000_2tuple_filter *filter;
2857 uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
2858 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
2861 filter = rte_zmalloc("e1000_2tuple_filter",
2862 sizeof(struct e1000_2tuple_filter), 0);
2866 ret = ntuple_filter_to_2tuple(ntuple_filter,
2867 &filter->filter_info);
2872 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2873 &filter->filter_info) != NULL) {
2874 PMD_DRV_LOG(ERR, "filter exists.");
2878 filter->queue = ntuple_filter->queue;
2881 * look for an unused 2tuple filter index,
2882 * and insert the filter to list.
2884 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
2885 if (!(filter_info->twotuple_mask & (1 << i))) {
2886 filter_info->twotuple_mask |= 1 << i;
2888 TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
2894 if (i >= E1000_MAX_TTQF_FILTERS) {
2895 PMD_DRV_LOG(ERR, "2tuple filters are full.");
2900 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
2901 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
2902 imir |= E1000_IMIR_PORT_BP;
2904 imir &= ~E1000_IMIR_PORT_BP;
2906 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
2908 ttqf |= E1000_TTQF_QUEUE_ENABLE;
2909 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
2910 ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
2911 if (filter->filter_info.proto_mask == 0)
2912 ttqf &= ~E1000_TTQF_MASK_ENABLE;
2914 /* tcp flags bits setting. */
2915 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
2916 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
2917 imir_ext |= E1000_IMIREXT_CTRL_URG;
2918 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
2919 imir_ext |= E1000_IMIREXT_CTRL_ACK;
2920 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
2921 imir_ext |= E1000_IMIREXT_CTRL_PSH;
2922 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
2923 imir_ext |= E1000_IMIREXT_CTRL_RST;
2924 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
2925 imir_ext |= E1000_IMIREXT_CTRL_SYN;
2926 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
2927 imir_ext |= E1000_IMIREXT_CTRL_FIN;
2929 imir_ext |= E1000_IMIREXT_CTRL_BP;
2930 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
2931 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
2932 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
2937 * igb_remove_2tuple_filter - remove a 2tuple filter
2940 * dev: Pointer to struct rte_eth_dev.
2941 * ntuple_filter: ponter to the filter that will be removed.
2944 * - On success, zero.
2945 * - On failure, a negative value.
2948 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
2949 struct rte_eth_ntuple_filter *ntuple_filter)
2951 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2952 struct e1000_filter_info *filter_info =
2953 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2954 struct e1000_2tuple_filter_info filter_2tuple;
2955 struct e1000_2tuple_filter *filter;
2958 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
2959 ret = ntuple_filter_to_2tuple(ntuple_filter,
2964 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2966 if (filter == NULL) {
2967 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2971 filter_info->twotuple_mask &= ~(1 << filter->index);
2972 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
2975 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
2976 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
2977 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
2981 static inline struct e1000_flex_filter *
2982 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
2983 struct e1000_flex_filter_info *key)
2985 struct e1000_flex_filter *it;
2987 TAILQ_FOREACH(it, filter_list, entries) {
2988 if (memcmp(key, &it->filter_info,
2989 sizeof(struct e1000_flex_filter_info)) == 0)
2997 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
2998 struct rte_eth_flex_filter *filter,
3001 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3002 struct e1000_filter_info *filter_info =
3003 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3004 struct e1000_flex_filter *flex_filter, *it;
3005 uint32_t wufc, queueing, mask;
3007 uint8_t shift, i, j = 0;
3009 flex_filter = rte_zmalloc("e1000_flex_filter",
3010 sizeof(struct e1000_flex_filter), 0);
3011 if (flex_filter == NULL)
3014 flex_filter->filter_info.len = filter->len;
3015 flex_filter->filter_info.priority = filter->priority;
3016 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
3017 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
3019 /* reverse bits in flex filter's mask*/
3020 for (shift = 0; shift < CHAR_BIT; shift++) {
3021 if (filter->mask[i] & (0x01 << shift))
3022 mask |= (0x80 >> shift);
3024 flex_filter->filter_info.mask[i] = mask;
3027 wufc = E1000_READ_REG(hw, E1000_WUFC);
3028 if (flex_filter->index < E1000_MAX_FHFT)
3029 reg_off = E1000_FHFT(flex_filter->index);
3031 reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
3034 if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
3035 &flex_filter->filter_info) != NULL) {
3036 PMD_DRV_LOG(ERR, "filter exists.");
3037 rte_free(flex_filter);
3040 flex_filter->queue = filter->queue;
3042 * look for an unused flex filter index
3043 * and insert the filter into the list.
3045 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
3046 if (!(filter_info->flex_mask & (1 << i))) {
3047 filter_info->flex_mask |= 1 << i;
3048 flex_filter->index = i;
3049 TAILQ_INSERT_TAIL(&filter_info->flex_list,
3055 if (i >= E1000_MAX_FLEX_FILTERS) {
3056 PMD_DRV_LOG(ERR, "flex filters are full.");
3057 rte_free(flex_filter);
3061 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
3062 (E1000_WUFC_FLX0 << flex_filter->index));
3063 queueing = filter->len |
3064 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
3065 (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
3066 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
3068 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
3069 E1000_WRITE_REG(hw, reg_off,
3070 flex_filter->filter_info.dwords[j]);
3071 reg_off += sizeof(uint32_t);
3072 E1000_WRITE_REG(hw, reg_off,
3073 flex_filter->filter_info.dwords[++j]);
3074 reg_off += sizeof(uint32_t);
3075 E1000_WRITE_REG(hw, reg_off,
3076 (uint32_t)flex_filter->filter_info.mask[i]);
3077 reg_off += sizeof(uint32_t) * 2;
3081 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3082 &flex_filter->filter_info);
3084 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3085 rte_free(flex_filter);
3089 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
3090 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
3091 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
3092 (~(E1000_WUFC_FLX0 << it->index)));
3094 filter_info->flex_mask &= ~(1 << it->index);
3095 TAILQ_REMOVE(&filter_info->flex_list, it, entries);
3097 rte_free(flex_filter);
3104 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
3105 struct rte_eth_flex_filter *filter)
3107 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3108 struct e1000_filter_info *filter_info =
3109 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3110 struct e1000_flex_filter flex_filter, *it;
3111 uint32_t wufc, queueing, wufc_en = 0;
3113 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
3114 flex_filter.filter_info.len = filter->len;
3115 flex_filter.filter_info.priority = filter->priority;
3116 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
3117 memcpy(flex_filter.filter_info.mask, filter->mask,
3118 RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
3120 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3121 &flex_filter.filter_info);
3123 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3127 wufc = E1000_READ_REG(hw, E1000_WUFC);
3128 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
3130 if ((wufc & wufc_en) == wufc_en) {
3131 uint32_t reg_off = 0;
3132 if (it->index < E1000_MAX_FHFT)
3133 reg_off = E1000_FHFT(it->index);
3135 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
3137 queueing = E1000_READ_REG(hw,
3138 reg_off + E1000_FHFT_QUEUEING_OFFSET);
3139 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
3140 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
3141 E1000_FHFT_QUEUEING_PRIO_SHIFT;
3142 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
3143 E1000_FHFT_QUEUEING_QUEUE_SHIFT;
3150 eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
3151 enum rte_filter_op filter_op,
3154 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3155 struct rte_eth_flex_filter *filter;
3158 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
3160 if (filter_op == RTE_ETH_FILTER_NOP)
3164 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
3169 filter = (struct rte_eth_flex_filter *)arg;
3170 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
3171 || filter->len % sizeof(uint64_t) != 0) {
3172 PMD_DRV_LOG(ERR, "filter's length is out of range");
3175 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
3176 PMD_DRV_LOG(ERR, "filter's priority is out of range");
3180 switch (filter_op) {
3181 case RTE_ETH_FILTER_ADD:
3182 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
3184 case RTE_ETH_FILTER_DELETE:
3185 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
3187 case RTE_ETH_FILTER_GET:
3188 ret = eth_igb_get_flex_filter(dev, filter);
3191 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
3199 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
3201 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
3202 struct e1000_5tuple_filter_info *filter_info)
3204 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
3206 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3207 return -EINVAL; /* filter index is out of range. */
3208 if (filter->tcp_flags > TCP_FLAG_ALL)
3209 return -EINVAL; /* flags is invalid. */
3211 switch (filter->dst_ip_mask) {
3213 filter_info->dst_ip_mask = 0;
3214 filter_info->dst_ip = filter->dst_ip;
3217 filter_info->dst_ip_mask = 1;
3220 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3224 switch (filter->src_ip_mask) {
3226 filter_info->src_ip_mask = 0;
3227 filter_info->src_ip = filter->src_ip;
3230 filter_info->src_ip_mask = 1;
3233 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3237 switch (filter->dst_port_mask) {
3239 filter_info->dst_port_mask = 0;
3240 filter_info->dst_port = filter->dst_port;
3243 filter_info->dst_port_mask = 1;
3246 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3250 switch (filter->src_port_mask) {
3252 filter_info->src_port_mask = 0;
3253 filter_info->src_port = filter->src_port;
3256 filter_info->src_port_mask = 1;
3259 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3263 switch (filter->proto_mask) {
3265 filter_info->proto_mask = 0;
3266 filter_info->proto = filter->proto;
3269 filter_info->proto_mask = 1;
3272 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3276 filter_info->priority = (uint8_t)filter->priority;
3277 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3278 filter_info->tcp_flags = filter->tcp_flags;
3280 filter_info->tcp_flags = 0;
3285 static inline struct e1000_5tuple_filter *
3286 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
3287 struct e1000_5tuple_filter_info *key)
3289 struct e1000_5tuple_filter *it;
3291 TAILQ_FOREACH(it, filter_list, entries) {
3292 if (memcmp(key, &it->filter_info,
3293 sizeof(struct e1000_5tuple_filter_info)) == 0) {
3301 * igb_add_5tuple_filter_82576 - add a 5tuple filter
3304 * dev: Pointer to struct rte_eth_dev.
3305 * ntuple_filter: ponter to the filter that will be added.
3308 * - On success, zero.
3309 * - On failure, a negative value.
3312 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
3313 struct rte_eth_ntuple_filter *ntuple_filter)
3315 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3316 struct e1000_filter_info *filter_info =
3317 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3318 struct e1000_5tuple_filter *filter;
3319 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
3320 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3324 filter = rte_zmalloc("e1000_5tuple_filter",
3325 sizeof(struct e1000_5tuple_filter), 0);
3329 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3330 &filter->filter_info);
3336 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3337 &filter->filter_info) != NULL) {
3338 PMD_DRV_LOG(ERR, "filter exists.");
3342 filter->queue = ntuple_filter->queue;
3345 * look for an unused 5tuple filter index,
3346 * and insert the filter to list.
3348 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
3349 if (!(filter_info->fivetuple_mask & (1 << i))) {
3350 filter_info->fivetuple_mask |= 1 << i;
3352 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3358 if (i >= E1000_MAX_FTQF_FILTERS) {
3359 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3364 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
3365 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
3366 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
3367 if (filter->filter_info.dst_ip_mask == 0)
3368 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
3369 if (filter->filter_info.src_port_mask == 0)
3370 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
3371 if (filter->filter_info.proto_mask == 0)
3372 ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
3373 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
3374 E1000_FTQF_QUEUE_MASK;
3375 ftqf |= E1000_FTQF_QUEUE_ENABLE;
3376 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
3377 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
3378 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
3380 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
3381 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
3383 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3384 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3385 imir |= E1000_IMIR_PORT_BP;
3387 imir &= ~E1000_IMIR_PORT_BP;
3388 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3390 /* tcp flags bits setting. */
3391 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
3392 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
3393 imir_ext |= E1000_IMIREXT_CTRL_URG;
3394 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
3395 imir_ext |= E1000_IMIREXT_CTRL_ACK;
3396 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
3397 imir_ext |= E1000_IMIREXT_CTRL_PSH;
3398 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
3399 imir_ext |= E1000_IMIREXT_CTRL_RST;
3400 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
3401 imir_ext |= E1000_IMIREXT_CTRL_SYN;
3402 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
3403 imir_ext |= E1000_IMIREXT_CTRL_FIN;
3405 imir_ext |= E1000_IMIREXT_CTRL_BP;
3406 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3407 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3412 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
3415 * dev: Pointer to struct rte_eth_dev.
3416 * ntuple_filter: ponter to the filter that will be removed.
3419 * - On success, zero.
3420 * - On failure, a negative value.
3423 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
3424 struct rte_eth_ntuple_filter *ntuple_filter)
3426 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3427 struct e1000_filter_info *filter_info =
3428 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3429 struct e1000_5tuple_filter_info filter_5tuple;
3430 struct e1000_5tuple_filter *filter;
3433 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
3434 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3439 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3441 if (filter == NULL) {
3442 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3446 filter_info->fivetuple_mask &= ~(1 << filter->index);
3447 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3450 E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
3451 E1000_FTQF_VF_BP | E1000_FTQF_MASK);
3452 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
3453 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
3454 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
3455 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3456 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3461 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3464 struct e1000_hw *hw;
3465 struct rte_eth_dev_info dev_info;
3466 uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
3469 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3471 #ifdef RTE_LIBRTE_82571_SUPPORT
3472 /* XXX: not bigger than max_rx_pktlen */
3473 if (hw->mac.type == e1000_82571)
3476 eth_igb_infos_get(dev, &dev_info);
3478 /* check that mtu is within the allowed range */
3479 if ((mtu < ETHER_MIN_MTU) ||
3480 (frame_size > dev_info.max_rx_pktlen))
3483 /* refuse mtu that requires the support of scattered packets when this
3484 * feature has not been enabled before. */
3485 if (!dev->data->scattered_rx &&
3486 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
3489 rctl = E1000_READ_REG(hw, E1000_RCTL);
3491 /* switch to jumbo mode if needed */
3492 if (frame_size > ETHER_MAX_LEN) {
3493 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3494 rctl |= E1000_RCTL_LPE;
3496 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3497 rctl &= ~E1000_RCTL_LPE;
3499 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3501 /* update max frame size */
3502 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3504 E1000_WRITE_REG(hw, E1000_RLPML,
3505 dev->data->dev_conf.rxmode.max_rx_pkt_len);
3511 * igb_add_del_ntuple_filter - add or delete a ntuple filter
3514 * dev: Pointer to struct rte_eth_dev.
3515 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3516 * add: if true, add filter, if false, remove filter
3519 * - On success, zero.
3520 * - On failure, a negative value.
3523 igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
3524 struct rte_eth_ntuple_filter *ntuple_filter,
3527 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3530 switch (ntuple_filter->flags) {
3531 case RTE_5TUPLE_FLAGS:
3532 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3533 if (hw->mac.type != e1000_82576)
3536 ret = igb_add_5tuple_filter_82576(dev,
3539 ret = igb_remove_5tuple_filter_82576(dev,
3542 case RTE_2TUPLE_FLAGS:
3543 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3544 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3547 ret = igb_add_2tuple_filter(dev, ntuple_filter);
3549 ret = igb_remove_2tuple_filter(dev, ntuple_filter);
3560 * igb_get_ntuple_filter - get a ntuple filter
3563 * dev: Pointer to struct rte_eth_dev.
3564 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3567 * - On success, zero.
3568 * - On failure, a negative value.
3571 igb_get_ntuple_filter(struct rte_eth_dev *dev,
3572 struct rte_eth_ntuple_filter *ntuple_filter)
3574 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3575 struct e1000_filter_info *filter_info =
3576 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3577 struct e1000_5tuple_filter_info filter_5tuple;
3578 struct e1000_2tuple_filter_info filter_2tuple;
3579 struct e1000_5tuple_filter *p_5tuple_filter;
3580 struct e1000_2tuple_filter *p_2tuple_filter;
3583 switch (ntuple_filter->flags) {
3584 case RTE_5TUPLE_FLAGS:
3585 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3586 if (hw->mac.type != e1000_82576)
3588 memset(&filter_5tuple,
3590 sizeof(struct e1000_5tuple_filter_info));
3591 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3595 p_5tuple_filter = igb_5tuple_filter_lookup_82576(
3596 &filter_info->fivetuple_list,
3598 if (p_5tuple_filter == NULL) {
3599 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3602 ntuple_filter->queue = p_5tuple_filter->queue;
3604 case RTE_2TUPLE_FLAGS:
3605 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3606 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3608 memset(&filter_2tuple,
3610 sizeof(struct e1000_2tuple_filter_info));
3611 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
3614 p_2tuple_filter = igb_2tuple_filter_lookup(
3615 &filter_info->twotuple_list,
3617 if (p_2tuple_filter == NULL) {
3618 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3621 ntuple_filter->queue = p_2tuple_filter->queue;
3632 * igb_ntuple_filter_handle - Handle operations for ntuple filter.
3633 * @dev: pointer to rte_eth_dev structure
3634 * @filter_op:operation will be taken.
3635 * @arg: a pointer to specific structure corresponding to the filter_op
3638 igb_ntuple_filter_handle(struct rte_eth_dev *dev,
3639 enum rte_filter_op filter_op,
3642 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3645 MAC_TYPE_FILTER_SUP(hw->mac.type);
3647 if (filter_op == RTE_ETH_FILTER_NOP)
3651 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3656 switch (filter_op) {
3657 case RTE_ETH_FILTER_ADD:
3658 ret = igb_add_del_ntuple_filter(dev,
3659 (struct rte_eth_ntuple_filter *)arg,
3662 case RTE_ETH_FILTER_DELETE:
3663 ret = igb_add_del_ntuple_filter(dev,
3664 (struct rte_eth_ntuple_filter *)arg,
3667 case RTE_ETH_FILTER_GET:
3668 ret = igb_get_ntuple_filter(dev,
3669 (struct rte_eth_ntuple_filter *)arg);
3672 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3680 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
3685 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3686 if (filter_info->ethertype_filters[i] == ethertype &&
3687 (filter_info->ethertype_mask & (1 << i)))
3694 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
3699 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3700 if (!(filter_info->ethertype_mask & (1 << i))) {
3701 filter_info->ethertype_mask |= 1 << i;
3702 filter_info->ethertype_filters[i] = ethertype;
3710 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
3713 if (idx >= E1000_MAX_ETQF_FILTERS)
3715 filter_info->ethertype_mask &= ~(1 << idx);
3716 filter_info->ethertype_filters[idx] = 0;
3722 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
3723 struct rte_eth_ethertype_filter *filter,
3726 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3727 struct e1000_filter_info *filter_info =
3728 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3732 if (filter->ether_type == ETHER_TYPE_IPv4 ||
3733 filter->ether_type == ETHER_TYPE_IPv6) {
3734 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
3735 " ethertype filter.", filter->ether_type);
3739 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
3740 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
3743 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
3744 PMD_DRV_LOG(ERR, "drop option is unsupported.");
3748 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3749 if (ret >= 0 && add) {
3750 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
3751 filter->ether_type);
3754 if (ret < 0 && !add) {
3755 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3756 filter->ether_type);
3761 ret = igb_ethertype_filter_insert(filter_info,
3762 filter->ether_type);
3764 PMD_DRV_LOG(ERR, "ethertype filters are full.");
3768 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
3769 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
3770 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
3772 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
3776 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
3777 E1000_WRITE_FLUSH(hw);
3783 igb_get_ethertype_filter(struct rte_eth_dev *dev,
3784 struct rte_eth_ethertype_filter *filter)
3786 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3787 struct e1000_filter_info *filter_info =
3788 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3792 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3794 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3795 filter->ether_type);
3799 etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
3800 if (etqf & E1000_ETQF_FILTER_ENABLE) {
3801 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
3803 filter->queue = (etqf & E1000_ETQF_QUEUE) >>
3804 E1000_ETQF_QUEUE_SHIFT;
3812 * igb_ethertype_filter_handle - Handle operations for ethertype filter.
3813 * @dev: pointer to rte_eth_dev structure
3814 * @filter_op:operation will be taken.
3815 * @arg: a pointer to specific structure corresponding to the filter_op
3818 igb_ethertype_filter_handle(struct rte_eth_dev *dev,
3819 enum rte_filter_op filter_op,
3822 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3825 MAC_TYPE_FILTER_SUP(hw->mac.type);
3827 if (filter_op == RTE_ETH_FILTER_NOP)
3831 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3836 switch (filter_op) {
3837 case RTE_ETH_FILTER_ADD:
3838 ret = igb_add_del_ethertype_filter(dev,
3839 (struct rte_eth_ethertype_filter *)arg,
3842 case RTE_ETH_FILTER_DELETE:
3843 ret = igb_add_del_ethertype_filter(dev,
3844 (struct rte_eth_ethertype_filter *)arg,
3847 case RTE_ETH_FILTER_GET:
3848 ret = igb_get_ethertype_filter(dev,
3849 (struct rte_eth_ethertype_filter *)arg);
3852 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3860 eth_igb_filter_ctrl(struct rte_eth_dev *dev,
3861 enum rte_filter_type filter_type,
3862 enum rte_filter_op filter_op,
3867 switch (filter_type) {
3868 case RTE_ETH_FILTER_NTUPLE:
3869 ret = igb_ntuple_filter_handle(dev, filter_op, arg);
3871 case RTE_ETH_FILTER_ETHERTYPE:
3872 ret = igb_ethertype_filter_handle(dev, filter_op, arg);
3874 case RTE_ETH_FILTER_SYN:
3875 ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
3877 case RTE_ETH_FILTER_FLEXIBLE:
3878 ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
3881 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3890 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
3891 struct ether_addr *mc_addr_set,
3892 uint32_t nb_mc_addr)
3894 struct e1000_hw *hw;
3896 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3897 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
3902 igb_timesync_enable(struct rte_eth_dev *dev)
3904 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3907 /* Start incrementing the register used to timestamp PTP packets. */
3908 E1000_WRITE_REG(hw, E1000_TIMINCA, E1000_TIMINCA_INIT);
3910 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3911 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
3913 E1000_ETQF_FILTER_ENABLE |
3916 /* Enable timestamping of received PTP packets. */
3917 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3918 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
3919 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
3921 /* Enable Timestamping of transmitted PTP packets. */
3922 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3923 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
3924 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
3930 igb_timesync_disable(struct rte_eth_dev *dev)
3932 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3935 /* Disable timestamping of transmitted PTP packets. */
3936 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3937 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
3938 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
3940 /* Disable timestamping of received PTP packets. */
3941 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3942 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
3943 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
3945 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3946 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
3948 /* Stop incrementating the System Time registers. */
3949 E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
3955 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3956 struct timespec *timestamp,
3957 uint32_t flags __rte_unused)
3959 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3960 uint32_t tsync_rxctl;
3964 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3965 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
3968 rx_stmpl = E1000_READ_REG(hw, E1000_RXSTMPL);
3969 rx_stmph = E1000_READ_REG(hw, E1000_RXSTMPH);
3971 timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
3972 timestamp->tv_nsec = 0;
3978 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3979 struct timespec *timestamp)
3981 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3982 uint32_t tsync_txctl;
3986 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3987 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
3990 tx_stmpl = E1000_READ_REG(hw, E1000_TXSTMPL);
3991 tx_stmph = E1000_READ_REG(hw, E1000_TXSTMPH);
3993 timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
3994 timestamp->tv_nsec = 0;
4000 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4004 const struct reg_info *reg_group;
4006 while ((reg_group = igb_regs[g_ind++]))
4007 count += igb_reg_group_count(reg_group);
4013 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4017 const struct reg_info *reg_group;
4019 while ((reg_group = igbvf_regs[g_ind++]))
4020 count += igb_reg_group_count(reg_group);
4026 eth_igb_get_regs(struct rte_eth_dev *dev,
4027 struct rte_dev_reg_info *regs)
4029 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4030 uint32_t *data = regs->data;
4033 const struct reg_info *reg_group;
4035 /* Support only full register dump */
4036 if ((regs->length == 0) ||
4037 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
4038 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4040 while ((reg_group = igb_regs[g_ind++]))
4041 count += igb_read_regs_group(dev, &data[count],
4050 igbvf_get_regs(struct rte_eth_dev *dev,
4051 struct rte_dev_reg_info *regs)
4053 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4054 uint32_t *data = regs->data;
4057 const struct reg_info *reg_group;
4059 /* Support only full register dump */
4060 if ((regs->length == 0) ||
4061 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
4062 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4064 while ((reg_group = igbvf_regs[g_ind++]))
4065 count += igb_read_regs_group(dev, &data[count],
4074 eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
4076 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4078 /* Return unit is byte count */
4079 return hw->nvm.word_size * 2;
4083 eth_igb_get_eeprom(struct rte_eth_dev *dev,
4084 struct rte_dev_eeprom_info *in_eeprom)
4086 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4087 struct e1000_nvm_info *nvm = &hw->nvm;
4088 uint16_t *data = in_eeprom->data;
4091 first = in_eeprom->offset >> 1;
4092 length = in_eeprom->length >> 1;
4093 if ((first >= hw->nvm.word_size) ||
4094 ((first + length) >= hw->nvm.word_size))
4097 in_eeprom->magic = hw->vendor_id |
4098 ((uint32_t)hw->device_id << 16);
4100 if ((nvm->ops.read) == NULL)
4103 return nvm->ops.read(hw, first, length, data);
4107 eth_igb_set_eeprom(struct rte_eth_dev *dev,
4108 struct rte_dev_eeprom_info *in_eeprom)
4110 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4111 struct e1000_nvm_info *nvm = &hw->nvm;
4112 uint16_t *data = in_eeprom->data;
4115 first = in_eeprom->offset >> 1;
4116 length = in_eeprom->length >> 1;
4117 if ((first >= hw->nvm.word_size) ||
4118 ((first + length) >= hw->nvm.word_size))
4121 in_eeprom->magic = (uint32_t)hw->vendor_id |
4122 ((uint32_t)hw->device_id << 16);
4124 if ((nvm->ops.write) == NULL)
4126 return nvm->ops.write(hw, first, length, data);
4129 static struct rte_driver pmd_igb_drv = {
4131 .init = rte_igb_pmd_init,
4134 static struct rte_driver pmd_igbvf_drv = {
4136 .init = rte_igbvf_pmd_init,
4141 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4143 struct e1000_hw *hw =
4144 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4145 uint32_t mask = 1 << queue_id;
4147 E1000_WRITE_REG(hw, E1000_EIMC, mask);
4148 E1000_WRITE_FLUSH(hw);
4154 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4156 struct e1000_hw *hw =
4157 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4158 uint32_t mask = 1 << queue_id;
4161 regval = E1000_READ_REG(hw, E1000_EIMS);
4162 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
4163 E1000_WRITE_FLUSH(hw);
4165 rte_intr_enable(&dev->pci_dev->intr_handle);
4171 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
4172 uint8_t index, uint8_t offset)
4174 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
4177 val &= ~((uint32_t)0xFF << offset);
4179 /* write vector and valid bit */
4180 val |= (msix_vector | E1000_IVAR_VALID) << offset;
4182 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
4186 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
4187 uint8_t queue, uint8_t msix_vector)
4191 if (hw->mac.type == e1000_82575) {
4193 tmp = E1000_EICR_RX_QUEUE0 << queue;
4194 else if (direction == 1)
4195 tmp = E1000_EICR_TX_QUEUE0 << queue;
4196 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
4197 } else if (hw->mac.type == e1000_82576) {
4198 if ((direction == 0) || (direction == 1))
4199 eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
4200 ((queue & 0x8) << 1) +
4202 } else if ((hw->mac.type == e1000_82580) ||
4203 (hw->mac.type == e1000_i350) ||
4204 (hw->mac.type == e1000_i354) ||
4205 (hw->mac.type == e1000_i210) ||
4206 (hw->mac.type == e1000_i211)) {
4207 if ((direction == 0) || (direction == 1))
4208 eth_igb_write_ivar(hw, msix_vector,
4210 ((queue & 0x1) << 4) +
4216 /* Sets up the hardware to generate MSI-X interrupts properly
4218 * board private structure
4221 eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
4225 uint32_t tmpval, regval, intr_mask;
4226 struct e1000_hw *hw =
4227 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4230 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4232 /* won't configure msix register if no mapping is done
4233 * between intr vector and event fd
4235 if (!rte_intr_dp_is_en(intr_handle))
4239 /* set interrupt vector for other causes */
4240 if (hw->mac.type == e1000_82575) {
4241 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
4242 /* enable MSI-X PBA support */
4243 tmpval |= E1000_CTRL_EXT_PBA_CLR;
4245 /* Auto-Mask interrupts upon ICR read */
4246 tmpval |= E1000_CTRL_EXT_EIAME;
4247 tmpval |= E1000_CTRL_EXT_IRCA;
4249 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
4251 /* enable msix_other interrupt */
4252 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
4253 regval = E1000_READ_REG(hw, E1000_EIAC);
4254 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
4255 regval = E1000_READ_REG(hw, E1000_EIAM);
4256 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
4257 } else if ((hw->mac.type == e1000_82576) ||
4258 (hw->mac.type == e1000_82580) ||
4259 (hw->mac.type == e1000_i350) ||
4260 (hw->mac.type == e1000_i354) ||
4261 (hw->mac.type == e1000_i210) ||
4262 (hw->mac.type == e1000_i211)) {
4263 /* turn on MSI-X capability first */
4264 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
4265 E1000_GPIE_PBA | E1000_GPIE_EIAME |
4268 intr_mask = (1 << intr_handle->max_intr) - 1;
4269 regval = E1000_READ_REG(hw, E1000_EIAC);
4270 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
4272 /* enable msix_other interrupt */
4273 regval = E1000_READ_REG(hw, E1000_EIMS);
4274 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
4275 tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
4276 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
4279 /* use EIAM to auto-mask when MSI-X interrupt
4280 * is asserted, this saves a register write for every interrupt
4282 intr_mask = (1 << intr_handle->nb_efd) - 1;
4283 regval = E1000_READ_REG(hw, E1000_EIAM);
4284 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
4286 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
4287 eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
4288 intr_handle->intr_vec[queue_id] = vec;
4289 if (vec < intr_handle->nb_efd - 1)
4293 E1000_WRITE_FLUSH(hw);
4297 PMD_REGISTER_DRIVER(pmd_igb_drv);
4298 PMD_REGISTER_DRIVER(pmd_igbvf_drv);