4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
51 #include <rte_atomic.h>
52 #include <rte_malloc.h>
55 #include "e1000_logs.h"
56 #include "base/e1000_api.h"
57 #include "e1000_ethdev.h"
61 * Default values for port configuration
63 #define IGB_DEFAULT_RX_FREE_THRESH 32
64 #define IGB_DEFAULT_RX_PTHRESH 8
65 #define IGB_DEFAULT_RX_HTHRESH 8
66 #define IGB_DEFAULT_RX_WTHRESH 0
68 #define IGB_DEFAULT_TX_PTHRESH 32
69 #define IGB_DEFAULT_TX_HTHRESH 0
70 #define IGB_DEFAULT_TX_WTHRESH 0
72 #define IGB_HKEY_MAX_INDEX 10
74 /* Bit shift and mask */
75 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
76 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
77 #define IGB_8_BIT_WIDTH CHAR_BIT
78 #define IGB_8_BIT_MASK UINT8_MAX
80 /* Additional timesync values. */
81 #define E1000_ETQF_FILTER_1588 3
82 #define E1000_TIMINCA_INCVALUE 16000000
83 #define E1000_TIMINCA_INIT ((0x02 << E1000_TIMINCA_16NS_SHIFT) \
84 | E1000_TIMINCA_INCVALUE)
85 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
87 static int eth_igb_configure(struct rte_eth_dev *dev);
88 static int eth_igb_start(struct rte_eth_dev *dev);
89 static void eth_igb_stop(struct rte_eth_dev *dev);
90 static void eth_igb_close(struct rte_eth_dev *dev);
91 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
92 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
93 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
94 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
95 static int eth_igb_link_update(struct rte_eth_dev *dev,
96 int wait_to_complete);
97 static void eth_igb_stats_get(struct rte_eth_dev *dev,
98 struct rte_eth_stats *rte_stats);
99 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
100 static void eth_igb_infos_get(struct rte_eth_dev *dev,
101 struct rte_eth_dev_info *dev_info);
102 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
103 struct rte_eth_dev_info *dev_info);
104 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
105 struct rte_eth_fc_conf *fc_conf);
106 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
107 struct rte_eth_fc_conf *fc_conf);
108 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
109 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
110 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
111 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
112 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
114 static int igb_hardware_init(struct e1000_hw *hw);
115 static void igb_hw_control_acquire(struct e1000_hw *hw);
116 static void igb_hw_control_release(struct e1000_hw *hw);
117 static void igb_init_manageability(struct e1000_hw *hw);
118 static void igb_release_manageability(struct e1000_hw *hw);
120 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
122 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
123 uint16_t vlan_id, int on);
124 static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
125 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
127 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
128 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
129 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
130 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
131 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
132 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
134 static int eth_igb_led_on(struct rte_eth_dev *dev);
135 static int eth_igb_led_off(struct rte_eth_dev *dev);
137 static void igb_intr_disable(struct e1000_hw *hw);
138 static int igb_get_rx_buffer_size(struct e1000_hw *hw);
139 static void eth_igb_rar_set(struct rte_eth_dev *dev,
140 struct ether_addr *mac_addr,
141 uint32_t index, uint32_t pool);
142 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
143 static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
144 struct ether_addr *addr);
146 static void igbvf_intr_disable(struct e1000_hw *hw);
147 static int igbvf_dev_configure(struct rte_eth_dev *dev);
148 static int igbvf_dev_start(struct rte_eth_dev *dev);
149 static void igbvf_dev_stop(struct rte_eth_dev *dev);
150 static void igbvf_dev_close(struct rte_eth_dev *dev);
151 static int eth_igbvf_link_update(struct e1000_hw *hw);
152 static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
153 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
154 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
155 uint16_t vlan_id, int on);
156 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
157 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
158 static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
159 struct ether_addr *addr);
160 static int igbvf_get_reg_length(struct rte_eth_dev *dev);
161 static int igbvf_get_regs(struct rte_eth_dev *dev,
162 struct rte_dev_reg_info *regs);
164 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
165 struct rte_eth_rss_reta_entry64 *reta_conf,
167 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
168 struct rte_eth_rss_reta_entry64 *reta_conf,
171 static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
172 struct rte_eth_syn_filter *filter,
174 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
175 struct rte_eth_syn_filter *filter);
176 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
177 enum rte_filter_op filter_op,
179 static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
180 struct rte_eth_ntuple_filter *ntuple_filter);
181 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
182 struct rte_eth_ntuple_filter *ntuple_filter);
183 static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
184 struct rte_eth_flex_filter *filter,
186 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
187 struct rte_eth_flex_filter *filter);
188 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
189 enum rte_filter_op filter_op,
191 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
192 struct rte_eth_ntuple_filter *ntuple_filter);
193 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
194 struct rte_eth_ntuple_filter *ntuple_filter);
195 static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
196 struct rte_eth_ntuple_filter *filter,
198 static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
199 struct rte_eth_ntuple_filter *filter);
200 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
201 enum rte_filter_op filter_op,
203 static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
204 struct rte_eth_ethertype_filter *filter,
206 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
207 enum rte_filter_op filter_op,
209 static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
210 struct rte_eth_ethertype_filter *filter);
211 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
212 enum rte_filter_type filter_type,
213 enum rte_filter_op filter_op,
215 static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
216 static int eth_igb_get_regs(struct rte_eth_dev *dev,
217 struct rte_dev_reg_info *regs);
218 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
219 static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
220 struct rte_dev_eeprom_info *eeprom);
221 static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
222 struct rte_dev_eeprom_info *eeprom);
223 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
224 struct ether_addr *mc_addr_set,
225 uint32_t nb_mc_addr);
226 static int igb_timesync_enable(struct rte_eth_dev *dev);
227 static int igb_timesync_disable(struct rte_eth_dev *dev);
228 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
229 struct timespec *timestamp,
231 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
232 struct timespec *timestamp);
233 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
235 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
237 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
238 uint8_t queue, uint8_t msix_vector);
239 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
240 uint8_t index, uint8_t offset);
241 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
244 * Define VF Stats MACRO for Non "cleared on read" register
246 #define UPDATE_VF_STAT(reg, last, cur) \
248 u32 latest = E1000_READ_REG(hw, reg); \
249 cur += latest - last; \
254 #define IGB_FC_PAUSE_TIME 0x0680
255 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
256 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
258 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
260 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
263 * The set of PCI devices this driver supports
265 static const struct rte_pci_id pci_id_igb_map[] = {
267 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
268 #include "rte_pci_dev_ids.h"
274 * The set of PCI devices this driver supports (for 82576&I350 VF)
276 static const struct rte_pci_id pci_id_igbvf_map[] = {
278 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
279 #include "rte_pci_dev_ids.h"
284 static const struct eth_dev_ops eth_igb_ops = {
285 .dev_configure = eth_igb_configure,
286 .dev_start = eth_igb_start,
287 .dev_stop = eth_igb_stop,
288 .dev_close = eth_igb_close,
289 .promiscuous_enable = eth_igb_promiscuous_enable,
290 .promiscuous_disable = eth_igb_promiscuous_disable,
291 .allmulticast_enable = eth_igb_allmulticast_enable,
292 .allmulticast_disable = eth_igb_allmulticast_disable,
293 .link_update = eth_igb_link_update,
294 .stats_get = eth_igb_stats_get,
295 .stats_reset = eth_igb_stats_reset,
296 .dev_infos_get = eth_igb_infos_get,
297 .mtu_set = eth_igb_mtu_set,
298 .vlan_filter_set = eth_igb_vlan_filter_set,
299 .vlan_tpid_set = eth_igb_vlan_tpid_set,
300 .vlan_offload_set = eth_igb_vlan_offload_set,
301 .rx_queue_setup = eth_igb_rx_queue_setup,
302 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
303 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
304 .rx_queue_release = eth_igb_rx_queue_release,
305 .rx_queue_count = eth_igb_rx_queue_count,
306 .rx_descriptor_done = eth_igb_rx_descriptor_done,
307 .tx_queue_setup = eth_igb_tx_queue_setup,
308 .tx_queue_release = eth_igb_tx_queue_release,
309 .dev_led_on = eth_igb_led_on,
310 .dev_led_off = eth_igb_led_off,
311 .flow_ctrl_get = eth_igb_flow_ctrl_get,
312 .flow_ctrl_set = eth_igb_flow_ctrl_set,
313 .mac_addr_add = eth_igb_rar_set,
314 .mac_addr_remove = eth_igb_rar_clear,
315 .mac_addr_set = eth_igb_default_mac_addr_set,
316 .reta_update = eth_igb_rss_reta_update,
317 .reta_query = eth_igb_rss_reta_query,
318 .rss_hash_update = eth_igb_rss_hash_update,
319 .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
320 .filter_ctrl = eth_igb_filter_ctrl,
321 .set_mc_addr_list = eth_igb_set_mc_addr_list,
322 .timesync_enable = igb_timesync_enable,
323 .timesync_disable = igb_timesync_disable,
324 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
325 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
326 .get_reg_length = eth_igb_get_reg_length,
327 .get_reg = eth_igb_get_regs,
328 .get_eeprom_length = eth_igb_get_eeprom_length,
329 .get_eeprom = eth_igb_get_eeprom,
330 .set_eeprom = eth_igb_set_eeprom,
334 * dev_ops for virtual function, bare necessities for basic vf
335 * operation have been implemented
337 static const struct eth_dev_ops igbvf_eth_dev_ops = {
338 .dev_configure = igbvf_dev_configure,
339 .dev_start = igbvf_dev_start,
340 .dev_stop = igbvf_dev_stop,
341 .dev_close = igbvf_dev_close,
342 .link_update = eth_igb_link_update,
343 .stats_get = eth_igbvf_stats_get,
344 .stats_reset = eth_igbvf_stats_reset,
345 .vlan_filter_set = igbvf_vlan_filter_set,
346 .dev_infos_get = eth_igbvf_infos_get,
347 .rx_queue_setup = eth_igb_rx_queue_setup,
348 .rx_queue_release = eth_igb_rx_queue_release,
349 .tx_queue_setup = eth_igb_tx_queue_setup,
350 .tx_queue_release = eth_igb_tx_queue_release,
351 .set_mc_addr_list = eth_igb_set_mc_addr_list,
352 .mac_addr_set = igbvf_default_mac_addr_set,
353 .get_reg_length = igbvf_get_reg_length,
354 .get_reg = igbvf_get_regs,
358 * Atomically reads the link status information from global
359 * structure rte_eth_dev.
362 * - Pointer to the structure rte_eth_dev to read from.
363 * - Pointer to the buffer to be saved with the link status.
366 * - On success, zero.
367 * - On failure, negative value.
370 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
371 struct rte_eth_link *link)
373 struct rte_eth_link *dst = link;
374 struct rte_eth_link *src = &(dev->data->dev_link);
376 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
377 *(uint64_t *)src) == 0)
384 * Atomically writes the link status information into global
385 * structure rte_eth_dev.
388 * - Pointer to the structure rte_eth_dev to read from.
389 * - Pointer to the buffer to be saved with the link status.
392 * - On success, zero.
393 * - On failure, negative value.
396 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
397 struct rte_eth_link *link)
399 struct rte_eth_link *dst = &(dev->data->dev_link);
400 struct rte_eth_link *src = link;
402 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
403 *(uint64_t *)src) == 0)
410 igb_intr_enable(struct rte_eth_dev *dev)
412 struct e1000_interrupt *intr =
413 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
414 struct e1000_hw *hw =
415 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
417 E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
418 E1000_WRITE_FLUSH(hw);
422 igb_intr_disable(struct e1000_hw *hw)
424 E1000_WRITE_REG(hw, E1000_IMC, ~0);
425 E1000_WRITE_FLUSH(hw);
428 static inline int32_t
429 igb_pf_reset_hw(struct e1000_hw *hw)
434 status = e1000_reset_hw(hw);
436 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
437 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
438 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
439 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
440 E1000_WRITE_FLUSH(hw);
446 igb_identify_hardware(struct rte_eth_dev *dev)
448 struct e1000_hw *hw =
449 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
451 hw->vendor_id = dev->pci_dev->id.vendor_id;
452 hw->device_id = dev->pci_dev->id.device_id;
453 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
454 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
456 e1000_set_mac_type(hw);
458 /* need to check if it is a vf device below */
462 igb_reset_swfw_lock(struct e1000_hw *hw)
467 * Do mac ops initialization manually here, since we will need
468 * some function pointers set by this call.
470 ret_val = e1000_init_mac_params(hw);
475 * SMBI lock should not fail in this early stage. If this is the case,
476 * it is due to an improper exit of the application.
477 * So force the release of the faulty lock.
479 if (e1000_get_hw_semaphore_generic(hw) < 0) {
480 PMD_DRV_LOG(DEBUG, "SMBI lock released");
482 e1000_put_hw_semaphore_generic(hw);
484 if (hw->mac.ops.acquire_swfw_sync != NULL) {
488 * Phy lock should not fail in this early stage. If this is the case,
489 * it is due to an improper exit of the application.
490 * So force the release of the faulty lock.
492 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
493 if (hw->bus.func > E1000_FUNC_1)
495 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
496 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
499 hw->mac.ops.release_swfw_sync(hw, mask);
502 * This one is more tricky since it is common to all ports; but
503 * swfw_sync retries last long enough (1s) to be almost sure that if
504 * lock can not be taken it is due to an improper lock of the
507 mask = E1000_SWFW_EEP_SM;
508 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
509 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
511 hw->mac.ops.release_swfw_sync(hw, mask);
514 return E1000_SUCCESS;
518 eth_igb_dev_init(struct rte_eth_dev *eth_dev)
521 struct rte_pci_device *pci_dev;
522 struct e1000_hw *hw =
523 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
524 struct e1000_vfta * shadow_vfta =
525 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
526 struct e1000_filter_info *filter_info =
527 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
528 struct e1000_adapter *adapter =
529 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
533 pci_dev = eth_dev->pci_dev;
534 eth_dev->dev_ops = ð_igb_ops;
535 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
536 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
538 /* for secondary processes, we don't initialise any further as primary
539 * has already done this work. Only check we don't need a different
541 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
542 if (eth_dev->data->scattered_rx)
543 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
547 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
549 igb_identify_hardware(eth_dev);
550 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
555 e1000_get_bus_info(hw);
557 /* Reset any pending lock */
558 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
563 /* Finish initialization */
564 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
570 hw->phy.autoneg_wait_to_complete = 0;
571 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
574 if (hw->phy.media_type == e1000_media_type_copper) {
575 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
576 hw->phy.disable_polarity_correction = 0;
577 hw->phy.ms_type = e1000_ms_hw_default;
581 * Start from a known state, this is important in reading the nvm
586 /* Make sure we have a good EEPROM before we read from it */
587 if (e1000_validate_nvm_checksum(hw) < 0) {
589 * Some PCI-E parts fail the first check due to
590 * the link being in sleep state, call it again,
591 * if it fails a second time its a real issue.
593 if (e1000_validate_nvm_checksum(hw) < 0) {
594 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
600 /* Read the permanent MAC address out of the EEPROM */
601 if (e1000_read_mac_addr(hw) != 0) {
602 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
607 /* Allocate memory for storing MAC addresses */
608 eth_dev->data->mac_addrs = rte_zmalloc("e1000",
609 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
610 if (eth_dev->data->mac_addrs == NULL) {
611 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
612 "store MAC addresses",
613 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
618 /* Copy the permanent MAC address */
619 ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]);
621 /* initialize the vfta */
622 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
624 /* Now initialize the hardware */
625 if (igb_hardware_init(hw) != 0) {
626 PMD_INIT_LOG(ERR, "Hardware initialization failed");
627 rte_free(eth_dev->data->mac_addrs);
628 eth_dev->data->mac_addrs = NULL;
632 hw->mac.get_link_status = 1;
633 adapter->stopped = 0;
635 /* Indicate SOL/IDER usage */
636 if (e1000_check_reset_block(hw) < 0) {
637 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
641 /* initialize PF if max_vfs not zero */
642 igb_pf_host_init(eth_dev);
644 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
645 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
646 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
647 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
648 E1000_WRITE_FLUSH(hw);
650 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
651 eth_dev->data->port_id, pci_dev->id.vendor_id,
652 pci_dev->id.device_id);
654 /* enable support intr */
655 igb_intr_enable(eth_dev);
657 TAILQ_INIT(&filter_info->flex_list);
658 filter_info->flex_mask = 0;
659 TAILQ_INIT(&filter_info->twotuple_list);
660 filter_info->twotuple_mask = 0;
661 TAILQ_INIT(&filter_info->fivetuple_list);
662 filter_info->fivetuple_mask = 0;
667 igb_hw_control_release(hw);
673 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
675 struct rte_pci_device *pci_dev;
677 struct e1000_adapter *adapter =
678 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
680 PMD_INIT_FUNC_TRACE();
682 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
685 hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
686 pci_dev = eth_dev->pci_dev;
688 if (adapter->stopped == 0)
689 eth_igb_close(eth_dev);
691 eth_dev->dev_ops = NULL;
692 eth_dev->rx_pkt_burst = NULL;
693 eth_dev->tx_pkt_burst = NULL;
695 /* Reset any pending lock */
696 igb_reset_swfw_lock(hw);
698 rte_free(eth_dev->data->mac_addrs);
699 eth_dev->data->mac_addrs = NULL;
701 /* uninitialize PF if max_vfs not zero */
702 igb_pf_host_uninit(eth_dev);
704 /* disable uio intr before callback unregister */
705 rte_intr_disable(&(pci_dev->intr_handle));
706 rte_intr_callback_unregister(&(pci_dev->intr_handle),
707 eth_igb_interrupt_handler, (void *)eth_dev);
713 * Virtual Function device init
716 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
718 struct rte_pci_device *pci_dev;
719 struct e1000_adapter *adapter =
720 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
721 struct e1000_hw *hw =
722 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
725 PMD_INIT_FUNC_TRACE();
727 eth_dev->dev_ops = &igbvf_eth_dev_ops;
728 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
729 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
731 /* for secondary processes, we don't initialise any further as primary
732 * has already done this work. Only check we don't need a different
734 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
735 if (eth_dev->data->scattered_rx)
736 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
740 pci_dev = eth_dev->pci_dev;
742 hw->device_id = pci_dev->id.device_id;
743 hw->vendor_id = pci_dev->id.vendor_id;
744 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
745 adapter->stopped = 0;
747 /* Initialize the shared code (base driver) */
748 diag = e1000_setup_init_funcs(hw, TRUE);
750 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
755 /* init_mailbox_params */
756 hw->mbx.ops.init_params(hw);
758 /* Disable the interrupts for VF */
759 igbvf_intr_disable(hw);
761 diag = hw->mac.ops.reset_hw(hw);
763 /* Allocate memory for storing MAC addresses */
764 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
765 hw->mac.rar_entry_count, 0);
766 if (eth_dev->data->mac_addrs == NULL) {
768 "Failed to allocate %d bytes needed to store MAC "
770 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
774 /* Copy the permanent MAC address */
775 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
776 ð_dev->data->mac_addrs[0]);
778 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
780 eth_dev->data->port_id, pci_dev->id.vendor_id,
781 pci_dev->id.device_id, "igb_mac_82576_vf");
787 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
789 struct e1000_adapter *adapter =
790 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
792 PMD_INIT_FUNC_TRACE();
794 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
797 if (adapter->stopped == 0)
798 igbvf_dev_close(eth_dev);
800 eth_dev->dev_ops = NULL;
801 eth_dev->rx_pkt_burst = NULL;
802 eth_dev->tx_pkt_burst = NULL;
804 rte_free(eth_dev->data->mac_addrs);
805 eth_dev->data->mac_addrs = NULL;
810 static struct eth_driver rte_igb_pmd = {
812 .name = "rte_igb_pmd",
813 .id_table = pci_id_igb_map,
814 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
815 RTE_PCI_DRV_DETACHABLE,
817 .eth_dev_init = eth_igb_dev_init,
818 .eth_dev_uninit = eth_igb_dev_uninit,
819 .dev_private_size = sizeof(struct e1000_adapter),
823 * virtual function driver struct
825 static struct eth_driver rte_igbvf_pmd = {
827 .name = "rte_igbvf_pmd",
828 .id_table = pci_id_igbvf_map,
829 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
831 .eth_dev_init = eth_igbvf_dev_init,
832 .eth_dev_uninit = eth_igbvf_dev_uninit,
833 .dev_private_size = sizeof(struct e1000_adapter),
837 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
839 rte_eth_driver_register(&rte_igb_pmd);
844 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
846 struct e1000_hw *hw =
847 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
848 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
849 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
850 rctl |= E1000_RCTL_VFE;
851 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
855 * VF Driver initialization routine.
856 * Invoked one at EAL init time.
857 * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
860 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
862 PMD_INIT_FUNC_TRACE();
864 rte_eth_driver_register(&rte_igbvf_pmd);
869 eth_igb_configure(struct rte_eth_dev *dev)
871 struct e1000_interrupt *intr =
872 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
874 PMD_INIT_FUNC_TRACE();
875 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
876 PMD_INIT_FUNC_TRACE();
882 eth_igb_start(struct rte_eth_dev *dev)
884 struct e1000_hw *hw =
885 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
886 struct e1000_adapter *adapter =
887 E1000_DEV_PRIVATE(dev->data->dev_private);
888 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
890 uint32_t intr_vector = 0;
893 PMD_INIT_FUNC_TRACE();
895 /* Power up the phy. Needed to make the link go Up */
896 e1000_power_up_phy(hw);
899 * Packet Buffer Allocation (PBA)
900 * Writing PBA sets the receive portion of the buffer
901 * the remainder is used for the transmit buffer.
903 if (hw->mac.type == e1000_82575) {
906 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
907 E1000_WRITE_REG(hw, E1000_PBA, pba);
910 /* Put the address into the Receive Address Array */
911 e1000_rar_set(hw, hw->mac.addr, 0);
913 /* Initialize the hardware */
914 if (igb_hardware_init(hw)) {
915 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
918 adapter->stopped = 0;
920 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
922 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
923 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
924 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
925 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
926 E1000_WRITE_FLUSH(hw);
928 /* configure PF module if SRIOV enabled */
929 igb_pf_host_configure(dev);
931 /* check and configure queue intr-vector mapping */
932 if (dev->data->dev_conf.intr_conf.rxq != 0)
933 intr_vector = dev->data->nb_rx_queues;
935 if (rte_intr_efd_enable(intr_handle, intr_vector))
938 if (rte_intr_dp_is_en(intr_handle)) {
939 intr_handle->intr_vec =
940 rte_zmalloc("intr_vec",
941 dev->data->nb_rx_queues * sizeof(int), 0);
942 if (intr_handle->intr_vec == NULL) {
943 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
944 " intr_vec\n", dev->data->nb_rx_queues);
949 /* confiugre msix for rx interrupt */
950 eth_igb_configure_msix_intr(dev);
952 /* Configure for OS presence */
953 igb_init_manageability(hw);
955 eth_igb_tx_init(dev);
957 /* This can fail when allocating mbufs for descriptor rings */
958 ret = eth_igb_rx_init(dev);
960 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
961 igb_dev_clear_queues(dev);
965 e1000_clear_hw_cntrs_base_generic(hw);
968 * VLAN Offload Settings
970 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
971 ETH_VLAN_EXTEND_MASK;
972 eth_igb_vlan_offload_set(dev, mask);
974 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
975 /* Enable VLAN filter since VMDq always use VLAN filter */
976 igb_vmdq_vlan_hw_filter_enable(dev);
979 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
980 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
981 (hw->mac.type == e1000_i211)) {
982 /* Configure EITR with the maximum possible value (0xFFFF) */
983 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
986 /* Setup link speed and duplex */
987 switch (dev->data->dev_conf.link_speed) {
988 case ETH_LINK_SPEED_AUTONEG:
989 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
990 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
991 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
992 hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
993 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
994 hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
996 goto error_invalid_config;
998 case ETH_LINK_SPEED_10:
999 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
1000 hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
1001 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1002 hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
1003 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1004 hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
1006 goto error_invalid_config;
1008 case ETH_LINK_SPEED_100:
1009 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
1010 hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
1011 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1012 hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
1013 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1014 hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
1016 goto error_invalid_config;
1018 case ETH_LINK_SPEED_1000:
1019 if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
1020 (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
1021 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
1023 goto error_invalid_config;
1025 case ETH_LINK_SPEED_10000:
1027 goto error_invalid_config;
1029 e1000_setup_link(hw);
1031 /* check if lsc interrupt feature is enabled */
1032 if (dev->data->dev_conf.intr_conf.lsc != 0) {
1033 if (rte_intr_allow_others(intr_handle)) {
1034 rte_intr_callback_register(intr_handle,
1035 eth_igb_interrupt_handler,
1037 eth_igb_lsc_interrupt_setup(dev);
1039 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1040 " no intr multiplex\n");
1043 /* check if rxq interrupt is enabled */
1044 if (dev->data->dev_conf.intr_conf.rxq != 0)
1045 eth_igb_rxq_interrupt_setup(dev);
1047 /* enable uio/vfio intr/eventfd mapping */
1048 rte_intr_enable(intr_handle);
1050 /* resume enabled intr since hw reset */
1051 igb_intr_enable(dev);
1053 PMD_INIT_LOG(DEBUG, "<<");
1057 error_invalid_config:
1058 PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
1059 dev->data->dev_conf.link_speed,
1060 dev->data->dev_conf.link_duplex, dev->data->port_id);
1061 igb_dev_clear_queues(dev);
1065 /*********************************************************************
1067 * This routine disables all traffic on the adapter by issuing a
1068 * global reset on the MAC.
1070 **********************************************************************/
1072 eth_igb_stop(struct rte_eth_dev *dev)
1074 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1075 struct e1000_filter_info *filter_info =
1076 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1077 struct rte_eth_link link;
1078 struct e1000_flex_filter *p_flex;
1079 struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
1080 struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
1081 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1083 igb_intr_disable(hw);
1085 /* disable intr eventfd mapping */
1086 rte_intr_disable(intr_handle);
1088 igb_pf_reset_hw(hw);
1089 E1000_WRITE_REG(hw, E1000_WUC, 0);
1091 /* Set bit for Go Link disconnect */
1092 if (hw->mac.type >= e1000_82580) {
1095 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1096 phpm_reg |= E1000_82580_PM_GO_LINKD;
1097 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1100 /* Power down the phy. Needed to make the link go Down */
1101 if (hw->phy.media_type == e1000_media_type_copper)
1102 e1000_power_down_phy(hw);
1104 e1000_shutdown_fiber_serdes_link(hw);
1106 igb_dev_clear_queues(dev);
1108 /* clear the recorded link status */
1109 memset(&link, 0, sizeof(link));
1110 rte_igb_dev_atomic_write_link_status(dev, &link);
1112 /* Remove all flex filters of the device */
1113 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
1114 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
1117 filter_info->flex_mask = 0;
1119 /* Remove all ntuple filters of the device */
1120 for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
1121 p_5tuple != NULL; p_5tuple = p_5tuple_next) {
1122 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
1123 TAILQ_REMOVE(&filter_info->fivetuple_list,
1127 filter_info->fivetuple_mask = 0;
1128 for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
1129 p_2tuple != NULL; p_2tuple = p_2tuple_next) {
1130 p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
1131 TAILQ_REMOVE(&filter_info->twotuple_list,
1135 filter_info->twotuple_mask = 0;
1137 /* Clean datapath event and queue/vec mapping */
1138 rte_intr_efd_disable(intr_handle);
1139 if (intr_handle->intr_vec != NULL) {
1140 rte_free(intr_handle->intr_vec);
1141 intr_handle->intr_vec = NULL;
1146 eth_igb_close(struct rte_eth_dev *dev)
1148 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1149 struct e1000_adapter *adapter =
1150 E1000_DEV_PRIVATE(dev->data->dev_private);
1151 struct rte_eth_link link;
1152 struct rte_pci_device *pci_dev;
1155 adapter->stopped = 1;
1157 e1000_phy_hw_reset(hw);
1158 igb_release_manageability(hw);
1159 igb_hw_control_release(hw);
1161 /* Clear bit for Go Link disconnect */
1162 if (hw->mac.type >= e1000_82580) {
1165 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1166 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1167 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1170 igb_dev_free_queues(dev);
1172 pci_dev = dev->pci_dev;
1173 if (pci_dev->intr_handle.intr_vec) {
1174 rte_free(pci_dev->intr_handle.intr_vec);
1175 pci_dev->intr_handle.intr_vec = NULL;
1178 memset(&link, 0, sizeof(link));
1179 rte_igb_dev_atomic_write_link_status(dev, &link);
1183 igb_get_rx_buffer_size(struct e1000_hw *hw)
1185 uint32_t rx_buf_size;
1186 if (hw->mac.type == e1000_82576) {
1187 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1188 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1189 /* PBS needs to be translated according to a lookup table */
1190 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1191 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1192 rx_buf_size = (rx_buf_size << 10);
1193 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1194 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1196 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1202 /*********************************************************************
1204 * Initialize the hardware
1206 **********************************************************************/
1208 igb_hardware_init(struct e1000_hw *hw)
1210 uint32_t rx_buf_size;
1213 /* Let the firmware know the OS is in control */
1214 igb_hw_control_acquire(hw);
1217 * These parameters control the automatic generation (Tx) and
1218 * response (Rx) to Ethernet PAUSE frames.
1219 * - High water mark should allow for at least two standard size (1518)
1220 * frames to be received after sending an XOFF.
1221 * - Low water mark works best when it is very near the high water mark.
1222 * This allows the receiver to restart by sending XON when it has
1223 * drained a bit. Here we use an arbitrary value of 1500 which will
1224 * restart after one full frame is pulled from the buffer. There
1225 * could be several smaller frames in the buffer and if so they will
1226 * not trigger the XON until their total number reduces the buffer
1228 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1230 rx_buf_size = igb_get_rx_buffer_size(hw);
1232 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1233 hw->fc.low_water = hw->fc.high_water - 1500;
1234 hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1235 hw->fc.send_xon = 1;
1237 /* Set Flow control, use the tunable location if sane */
1238 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1239 hw->fc.requested_mode = igb_fc_setting;
1241 hw->fc.requested_mode = e1000_fc_none;
1243 /* Issue a global reset */
1244 igb_pf_reset_hw(hw);
1245 E1000_WRITE_REG(hw, E1000_WUC, 0);
1247 diag = e1000_init_hw(hw);
1251 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1252 e1000_get_phy_info(hw);
1253 e1000_check_for_link(hw);
1258 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1260 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1262 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1263 struct e1000_hw_stats *stats =
1264 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1267 if(hw->phy.media_type == e1000_media_type_copper ||
1268 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1270 E1000_READ_REG(hw,E1000_SYMERRS);
1271 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1274 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1275 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1276 stats->scc += E1000_READ_REG(hw, E1000_SCC);
1277 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1279 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1280 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1281 stats->colc += E1000_READ_REG(hw, E1000_COLC);
1282 stats->dc += E1000_READ_REG(hw, E1000_DC);
1283 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1284 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1285 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1287 ** For watchdog management we need to know if we have been
1288 ** paused during the last interval, so capture that here.
1290 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1291 stats->xoffrxc += pause_frames;
1292 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1293 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1294 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1295 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1296 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1297 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1298 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1299 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1300 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1301 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1302 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1303 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1305 /* For the 64-bit byte counters the low dword must be read first. */
1306 /* Both registers clear on the read of the high dword */
1308 stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1309 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1310 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1311 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1313 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1314 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1315 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1316 stats->roc += E1000_READ_REG(hw, E1000_ROC);
1317 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1319 stats->tor += E1000_READ_REG(hw, E1000_TORL);
1320 stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
1321 stats->tot += E1000_READ_REG(hw, E1000_TOTL);
1322 stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
1324 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1325 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1326 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1327 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1328 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1329 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1330 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1331 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1332 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1333 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1335 /* Interrupt Counts */
1337 stats->iac += E1000_READ_REG(hw, E1000_IAC);
1338 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1339 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1340 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1341 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1342 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1343 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1344 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1345 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1347 /* Host to Card Statistics */
1349 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1350 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1351 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1352 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1353 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1354 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1355 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1356 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1357 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1358 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1359 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1360 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1361 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1362 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1364 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1365 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1366 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1367 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1368 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1369 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1371 if (rte_stats == NULL)
1375 rte_stats->ibadcrc = stats->crcerrs;
1376 rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
1377 rte_stats->imissed = stats->mpc;
1378 rte_stats->ierrors = rte_stats->ibadcrc +
1379 rte_stats->ibadlen +
1380 rte_stats->imissed +
1381 stats->rxerrc + stats->algnerrc + stats->cexterr;
1384 rte_stats->oerrors = stats->ecol + stats->latecol;
1386 /* XON/XOFF pause frames */
1387 rte_stats->tx_pause_xon = stats->xontxc;
1388 rte_stats->rx_pause_xon = stats->xonrxc;
1389 rte_stats->tx_pause_xoff = stats->xofftxc;
1390 rte_stats->rx_pause_xoff = stats->xoffrxc;
1392 rte_stats->ipackets = stats->gprc;
1393 rte_stats->opackets = stats->gptc;
1394 rte_stats->ibytes = stats->gorc;
1395 rte_stats->obytes = stats->gotc;
1399 eth_igb_stats_reset(struct rte_eth_dev *dev)
1401 struct e1000_hw_stats *hw_stats =
1402 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1404 /* HW registers are cleared on read */
1405 eth_igb_stats_get(dev, NULL);
1407 /* Reset software totals */
1408 memset(hw_stats, 0, sizeof(*hw_stats));
1412 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1414 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1415 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1416 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1418 /* Good Rx packets, include VF loopback */
1419 UPDATE_VF_STAT(E1000_VFGPRC,
1420 hw_stats->last_gprc, hw_stats->gprc);
1422 /* Good Rx octets, include VF loopback */
1423 UPDATE_VF_STAT(E1000_VFGORC,
1424 hw_stats->last_gorc, hw_stats->gorc);
1426 /* Good Tx packets, include VF loopback */
1427 UPDATE_VF_STAT(E1000_VFGPTC,
1428 hw_stats->last_gptc, hw_stats->gptc);
1430 /* Good Tx octets, include VF loopback */
1431 UPDATE_VF_STAT(E1000_VFGOTC,
1432 hw_stats->last_gotc, hw_stats->gotc);
1434 /* Rx Multicst packets */
1435 UPDATE_VF_STAT(E1000_VFMPRC,
1436 hw_stats->last_mprc, hw_stats->mprc);
1438 /* Good Rx loopback packets */
1439 UPDATE_VF_STAT(E1000_VFGPRLBC,
1440 hw_stats->last_gprlbc, hw_stats->gprlbc);
1442 /* Good Rx loopback octets */
1443 UPDATE_VF_STAT(E1000_VFGORLBC,
1444 hw_stats->last_gorlbc, hw_stats->gorlbc);
1446 /* Good Tx loopback packets */
1447 UPDATE_VF_STAT(E1000_VFGPTLBC,
1448 hw_stats->last_gptlbc, hw_stats->gptlbc);
1450 /* Good Tx loopback octets */
1451 UPDATE_VF_STAT(E1000_VFGOTLBC,
1452 hw_stats->last_gotlbc, hw_stats->gotlbc);
1454 if (rte_stats == NULL)
1457 rte_stats->ipackets = hw_stats->gprc;
1458 rte_stats->ibytes = hw_stats->gorc;
1459 rte_stats->opackets = hw_stats->gptc;
1460 rte_stats->obytes = hw_stats->gotc;
1461 rte_stats->imcasts = hw_stats->mprc;
1462 rte_stats->ilbpackets = hw_stats->gprlbc;
1463 rte_stats->ilbbytes = hw_stats->gorlbc;
1464 rte_stats->olbpackets = hw_stats->gptlbc;
1465 rte_stats->olbbytes = hw_stats->gotlbc;
1470 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1472 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1473 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1475 /* Sync HW register to the last stats */
1476 eth_igbvf_stats_get(dev, NULL);
1478 /* reset HW current stats*/
1479 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1480 offsetof(struct e1000_vf_stats, gprc));
1485 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1487 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1489 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1490 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1491 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1492 dev_info->rx_offload_capa =
1493 DEV_RX_OFFLOAD_VLAN_STRIP |
1494 DEV_RX_OFFLOAD_IPV4_CKSUM |
1495 DEV_RX_OFFLOAD_UDP_CKSUM |
1496 DEV_RX_OFFLOAD_TCP_CKSUM;
1497 dev_info->tx_offload_capa =
1498 DEV_TX_OFFLOAD_VLAN_INSERT |
1499 DEV_TX_OFFLOAD_IPV4_CKSUM |
1500 DEV_TX_OFFLOAD_UDP_CKSUM |
1501 DEV_TX_OFFLOAD_TCP_CKSUM |
1502 DEV_TX_OFFLOAD_SCTP_CKSUM |
1503 DEV_TX_OFFLOAD_TCP_TSO;
1505 switch (hw->mac.type) {
1507 dev_info->max_rx_queues = 4;
1508 dev_info->max_tx_queues = 4;
1509 dev_info->max_vmdq_pools = 0;
1513 dev_info->max_rx_queues = 16;
1514 dev_info->max_tx_queues = 16;
1515 dev_info->max_vmdq_pools = ETH_8_POOLS;
1516 dev_info->vmdq_queue_num = 16;
1520 dev_info->max_rx_queues = 8;
1521 dev_info->max_tx_queues = 8;
1522 dev_info->max_vmdq_pools = ETH_8_POOLS;
1523 dev_info->vmdq_queue_num = 8;
1527 dev_info->max_rx_queues = 8;
1528 dev_info->max_tx_queues = 8;
1529 dev_info->max_vmdq_pools = ETH_8_POOLS;
1530 dev_info->vmdq_queue_num = 8;
1534 dev_info->max_rx_queues = 8;
1535 dev_info->max_tx_queues = 8;
1539 dev_info->max_rx_queues = 4;
1540 dev_info->max_tx_queues = 4;
1541 dev_info->max_vmdq_pools = 0;
1545 dev_info->max_rx_queues = 2;
1546 dev_info->max_tx_queues = 2;
1547 dev_info->max_vmdq_pools = 0;
1551 /* Should not happen */
1554 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
1555 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1556 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
1558 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1560 .pthresh = IGB_DEFAULT_RX_PTHRESH,
1561 .hthresh = IGB_DEFAULT_RX_HTHRESH,
1562 .wthresh = IGB_DEFAULT_RX_WTHRESH,
1564 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1568 dev_info->default_txconf = (struct rte_eth_txconf) {
1570 .pthresh = IGB_DEFAULT_TX_PTHRESH,
1571 .hthresh = IGB_DEFAULT_TX_HTHRESH,
1572 .wthresh = IGB_DEFAULT_TX_WTHRESH,
1579 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1581 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1583 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1584 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1585 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1586 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1587 DEV_RX_OFFLOAD_IPV4_CKSUM |
1588 DEV_RX_OFFLOAD_UDP_CKSUM |
1589 DEV_RX_OFFLOAD_TCP_CKSUM;
1590 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1591 DEV_TX_OFFLOAD_IPV4_CKSUM |
1592 DEV_TX_OFFLOAD_UDP_CKSUM |
1593 DEV_TX_OFFLOAD_TCP_CKSUM |
1594 DEV_TX_OFFLOAD_SCTP_CKSUM |
1595 DEV_TX_OFFLOAD_TCP_TSO;
1596 switch (hw->mac.type) {
1598 dev_info->max_rx_queues = 2;
1599 dev_info->max_tx_queues = 2;
1601 case e1000_vfadapt_i350:
1602 dev_info->max_rx_queues = 1;
1603 dev_info->max_tx_queues = 1;
1606 /* Should not happen */
1610 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1612 .pthresh = IGB_DEFAULT_RX_PTHRESH,
1613 .hthresh = IGB_DEFAULT_RX_HTHRESH,
1614 .wthresh = IGB_DEFAULT_RX_WTHRESH,
1616 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1620 dev_info->default_txconf = (struct rte_eth_txconf) {
1622 .pthresh = IGB_DEFAULT_TX_PTHRESH,
1623 .hthresh = IGB_DEFAULT_TX_HTHRESH,
1624 .wthresh = IGB_DEFAULT_TX_WTHRESH,
1630 /* return 0 means link status changed, -1 means not changed */
1632 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1634 struct e1000_hw *hw =
1635 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1636 struct rte_eth_link link, old;
1637 int link_check, count;
1640 hw->mac.get_link_status = 1;
1642 /* possible wait-to-complete in up to 9 seconds */
1643 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
1644 /* Read the real link status */
1645 switch (hw->phy.media_type) {
1646 case e1000_media_type_copper:
1647 /* Do the work to read phy */
1648 e1000_check_for_link(hw);
1649 link_check = !hw->mac.get_link_status;
1652 case e1000_media_type_fiber:
1653 e1000_check_for_link(hw);
1654 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1658 case e1000_media_type_internal_serdes:
1659 e1000_check_for_link(hw);
1660 link_check = hw->mac.serdes_has_link;
1663 /* VF device is type_unknown */
1664 case e1000_media_type_unknown:
1665 eth_igbvf_link_update(hw);
1666 link_check = !hw->mac.get_link_status;
1672 if (link_check || wait_to_complete == 0)
1674 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
1676 memset(&link, 0, sizeof(link));
1677 rte_igb_dev_atomic_read_link_status(dev, &link);
1680 /* Now we check if a transition has happened */
1682 hw->mac.ops.get_link_up_info(hw, &link.link_speed,
1684 link.link_status = 1;
1685 } else if (!link_check) {
1686 link.link_speed = 0;
1687 link.link_duplex = 0;
1688 link.link_status = 0;
1690 rte_igb_dev_atomic_write_link_status(dev, &link);
1693 if (old.link_status == link.link_status)
1701 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
1702 * For ASF and Pass Through versions of f/w this means
1703 * that the driver is loaded.
1706 igb_hw_control_acquire(struct e1000_hw *hw)
1710 /* Let firmware know the driver has taken over */
1711 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1712 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1716 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
1717 * For ASF and Pass Through versions of f/w this means that the
1718 * driver is no longer loaded.
1721 igb_hw_control_release(struct e1000_hw *hw)
1725 /* Let firmware taken over control of h/w */
1726 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1727 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
1728 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1732 * Bit of a misnomer, what this really means is
1733 * to enable OS management of the system... aka
1734 * to disable special hardware management features.
1737 igb_init_manageability(struct e1000_hw *hw)
1739 if (e1000_enable_mng_pass_thru(hw)) {
1740 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
1741 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1743 /* disable hardware interception of ARP */
1744 manc &= ~(E1000_MANC_ARP_EN);
1746 /* enable receiving management packets to the host */
1747 manc |= E1000_MANC_EN_MNG2HOST;
1748 manc2h |= 1 << 5; /* Mng Port 623 */
1749 manc2h |= 1 << 6; /* Mng Port 664 */
1750 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
1751 E1000_WRITE_REG(hw, E1000_MANC, manc);
1756 igb_release_manageability(struct e1000_hw *hw)
1758 if (e1000_enable_mng_pass_thru(hw)) {
1759 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1761 manc |= E1000_MANC_ARP_EN;
1762 manc &= ~E1000_MANC_EN_MNG2HOST;
1764 E1000_WRITE_REG(hw, E1000_MANC, manc);
1769 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
1771 struct e1000_hw *hw =
1772 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1775 rctl = E1000_READ_REG(hw, E1000_RCTL);
1776 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1777 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1781 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
1783 struct e1000_hw *hw =
1784 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1787 rctl = E1000_READ_REG(hw, E1000_RCTL);
1788 rctl &= (~E1000_RCTL_UPE);
1789 if (dev->data->all_multicast == 1)
1790 rctl |= E1000_RCTL_MPE;
1792 rctl &= (~E1000_RCTL_MPE);
1793 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1797 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
1799 struct e1000_hw *hw =
1800 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1803 rctl = E1000_READ_REG(hw, E1000_RCTL);
1804 rctl |= E1000_RCTL_MPE;
1805 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1809 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
1811 struct e1000_hw *hw =
1812 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1815 if (dev->data->promiscuous == 1)
1816 return; /* must remain in all_multicast mode */
1817 rctl = E1000_READ_REG(hw, E1000_RCTL);
1818 rctl &= (~E1000_RCTL_MPE);
1819 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1823 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1825 struct e1000_hw *hw =
1826 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1827 struct e1000_vfta * shadow_vfta =
1828 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1833 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
1834 E1000_VFTA_ENTRY_MASK);
1835 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
1836 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
1841 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
1843 /* update local VFTA copy */
1844 shadow_vfta->vfta[vid_idx] = vfta;
1850 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1852 struct e1000_hw *hw =
1853 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1854 uint32_t reg = ETHER_TYPE_VLAN ;
1856 reg |= (tpid << 16);
1857 E1000_WRITE_REG(hw, E1000_VET, reg);
1861 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1863 struct e1000_hw *hw =
1864 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1867 /* Filter Table Disable */
1868 reg = E1000_READ_REG(hw, E1000_RCTL);
1869 reg &= ~E1000_RCTL_CFIEN;
1870 reg &= ~E1000_RCTL_VFE;
1871 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1875 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1877 struct e1000_hw *hw =
1878 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1879 struct e1000_vfta * shadow_vfta =
1880 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1884 /* Filter Table Enable, CFI not used for packet acceptance */
1885 reg = E1000_READ_REG(hw, E1000_RCTL);
1886 reg &= ~E1000_RCTL_CFIEN;
1887 reg |= E1000_RCTL_VFE;
1888 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1890 /* restore VFTA table */
1891 for (i = 0; i < IGB_VFTA_SIZE; i++)
1892 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
1896 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
1898 struct e1000_hw *hw =
1899 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1902 /* VLAN Mode Disable */
1903 reg = E1000_READ_REG(hw, E1000_CTRL);
1904 reg &= ~E1000_CTRL_VME;
1905 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1909 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
1911 struct e1000_hw *hw =
1912 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1915 /* VLAN Mode Enable */
1916 reg = E1000_READ_REG(hw, E1000_CTRL);
1917 reg |= E1000_CTRL_VME;
1918 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1922 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1924 struct e1000_hw *hw =
1925 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1928 /* CTRL_EXT: Extended VLAN */
1929 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1930 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
1931 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1933 /* Update maximum packet length */
1934 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1935 E1000_WRITE_REG(hw, E1000_RLPML,
1936 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1941 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1943 struct e1000_hw *hw =
1944 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1947 /* CTRL_EXT: Extended VLAN */
1948 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1949 reg |= E1000_CTRL_EXT_EXTEND_VLAN;
1950 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1952 /* Update maximum packet length */
1953 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1954 E1000_WRITE_REG(hw, E1000_RLPML,
1955 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1960 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1962 if(mask & ETH_VLAN_STRIP_MASK){
1963 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1964 igb_vlan_hw_strip_enable(dev);
1966 igb_vlan_hw_strip_disable(dev);
1969 if(mask & ETH_VLAN_FILTER_MASK){
1970 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1971 igb_vlan_hw_filter_enable(dev);
1973 igb_vlan_hw_filter_disable(dev);
1976 if(mask & ETH_VLAN_EXTEND_MASK){
1977 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1978 igb_vlan_hw_extend_enable(dev);
1980 igb_vlan_hw_extend_disable(dev);
1986 * It enables the interrupt mask and then enable the interrupt.
1989 * Pointer to struct rte_eth_dev.
1992 * - On success, zero.
1993 * - On failure, a negative value.
1996 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
1998 struct e1000_interrupt *intr =
1999 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2001 intr->mask |= E1000_ICR_LSC;
2006 /* It clears the interrupt causes and enables the interrupt.
2007 * It will be called once only during nic initialized.
2010 * Pointer to struct rte_eth_dev.
2013 * - On success, zero.
2014 * - On failure, a negative value.
2016 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
2018 uint32_t mask, regval;
2019 struct e1000_hw *hw =
2020 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2021 struct rte_eth_dev_info dev_info;
2023 memset(&dev_info, 0, sizeof(dev_info));
2024 eth_igb_infos_get(dev, &dev_info);
2026 mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
2027 regval = E1000_READ_REG(hw, E1000_EIMS);
2028 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
2034 * It reads ICR and gets interrupt causes, check it and set a bit flag
2035 * to update link status.
2038 * Pointer to struct rte_eth_dev.
2041 * - On success, zero.
2042 * - On failure, a negative value.
2045 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
2048 struct e1000_hw *hw =
2049 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2050 struct e1000_interrupt *intr =
2051 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2053 igb_intr_disable(hw);
2055 /* read-on-clear nic registers here */
2056 icr = E1000_READ_REG(hw, E1000_ICR);
2059 if (icr & E1000_ICR_LSC) {
2060 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
2063 if (icr & E1000_ICR_VMMB)
2064 intr->flags |= E1000_FLAG_MAILBOX;
2070 * It executes link_update after knowing an interrupt is prsent.
2073 * Pointer to struct rte_eth_dev.
2076 * - On success, zero.
2077 * - On failure, a negative value.
2080 eth_igb_interrupt_action(struct rte_eth_dev *dev)
2082 struct e1000_hw *hw =
2083 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2084 struct e1000_interrupt *intr =
2085 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2086 uint32_t tctl, rctl;
2087 struct rte_eth_link link;
2090 if (intr->flags & E1000_FLAG_MAILBOX) {
2091 igb_pf_mbx_process(dev);
2092 intr->flags &= ~E1000_FLAG_MAILBOX;
2095 igb_intr_enable(dev);
2096 rte_intr_enable(&(dev->pci_dev->intr_handle));
2098 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
2099 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
2101 /* set get_link_status to check register later */
2102 hw->mac.get_link_status = 1;
2103 ret = eth_igb_link_update(dev, 0);
2105 /* check if link has changed */
2109 memset(&link, 0, sizeof(link));
2110 rte_igb_dev_atomic_read_link_status(dev, &link);
2111 if (link.link_status) {
2113 " Port %d: Link Up - speed %u Mbps - %s",
2115 (unsigned)link.link_speed,
2116 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2117 "full-duplex" : "half-duplex");
2119 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2120 dev->data->port_id);
2123 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
2124 dev->pci_dev->addr.domain,
2125 dev->pci_dev->addr.bus,
2126 dev->pci_dev->addr.devid,
2127 dev->pci_dev->addr.function);
2128 tctl = E1000_READ_REG(hw, E1000_TCTL);
2129 rctl = E1000_READ_REG(hw, E1000_RCTL);
2130 if (link.link_status) {
2132 tctl |= E1000_TCTL_EN;
2133 rctl |= E1000_RCTL_EN;
2136 tctl &= ~E1000_TCTL_EN;
2137 rctl &= ~E1000_RCTL_EN;
2139 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2140 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2141 E1000_WRITE_FLUSH(hw);
2142 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
2149 * Interrupt handler which shall be registered at first.
2152 * Pointer to interrupt handle.
2154 * The address of parameter (struct rte_eth_dev *) regsitered before.
2160 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
2163 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2165 eth_igb_interrupt_get_status(dev);
2166 eth_igb_interrupt_action(dev);
2170 eth_igb_led_on(struct rte_eth_dev *dev)
2172 struct e1000_hw *hw;
2174 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2175 return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
2179 eth_igb_led_off(struct rte_eth_dev *dev)
2181 struct e1000_hw *hw;
2183 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2184 return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
2188 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2190 struct e1000_hw *hw;
2195 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2196 fc_conf->pause_time = hw->fc.pause_time;
2197 fc_conf->high_water = hw->fc.high_water;
2198 fc_conf->low_water = hw->fc.low_water;
2199 fc_conf->send_xon = hw->fc.send_xon;
2200 fc_conf->autoneg = hw->mac.autoneg;
2203 * Return rx_pause and tx_pause status according to actual setting of
2204 * the TFCE and RFCE bits in the CTRL register.
2206 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2207 if (ctrl & E1000_CTRL_TFCE)
2212 if (ctrl & E1000_CTRL_RFCE)
2217 if (rx_pause && tx_pause)
2218 fc_conf->mode = RTE_FC_FULL;
2220 fc_conf->mode = RTE_FC_RX_PAUSE;
2222 fc_conf->mode = RTE_FC_TX_PAUSE;
2224 fc_conf->mode = RTE_FC_NONE;
2230 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2232 struct e1000_hw *hw;
2234 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
2240 uint32_t rx_buf_size;
2241 uint32_t max_high_water;
2244 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2245 if (fc_conf->autoneg != hw->mac.autoneg)
2247 rx_buf_size = igb_get_rx_buffer_size(hw);
2248 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2250 /* At least reserve one Ethernet frame for watermark */
2251 max_high_water = rx_buf_size - ETHER_MAX_LEN;
2252 if ((fc_conf->high_water > max_high_water) ||
2253 (fc_conf->high_water < fc_conf->low_water)) {
2254 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
2255 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
2259 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
2260 hw->fc.pause_time = fc_conf->pause_time;
2261 hw->fc.high_water = fc_conf->high_water;
2262 hw->fc.low_water = fc_conf->low_water;
2263 hw->fc.send_xon = fc_conf->send_xon;
2265 err = e1000_setup_link_generic(hw);
2266 if (err == E1000_SUCCESS) {
2268 /* check if we want to forward MAC frames - driver doesn't have native
2269 * capability to do that, so we'll write the registers ourselves */
2271 rctl = E1000_READ_REG(hw, E1000_RCTL);
2273 /* set or clear MFLCN.PMCF bit depending on configuration */
2274 if (fc_conf->mac_ctrl_frame_fwd != 0)
2275 rctl |= E1000_RCTL_PMCF;
2277 rctl &= ~E1000_RCTL_PMCF;
2279 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2280 E1000_WRITE_FLUSH(hw);
2285 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
2289 #define E1000_RAH_POOLSEL_SHIFT (18)
2291 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2292 uint32_t index, __rte_unused uint32_t pool)
2294 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2297 e1000_rar_set(hw, mac_addr->addr_bytes, index);
2298 rah = E1000_READ_REG(hw, E1000_RAH(index));
2299 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
2300 E1000_WRITE_REG(hw, E1000_RAH(index), rah);
2304 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
2306 uint8_t addr[ETHER_ADDR_LEN];
2307 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2309 memset(addr, 0, sizeof(addr));
2311 e1000_rar_set(hw, addr, index);
2315 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
2316 struct ether_addr *addr)
2318 eth_igb_rar_clear(dev, 0);
2320 eth_igb_rar_set(dev, (void *)addr, 0, 0);
2323 * Virtual Function operations
2326 igbvf_intr_disable(struct e1000_hw *hw)
2328 PMD_INIT_FUNC_TRACE();
2330 /* Clear interrupt mask to stop from interrupts being generated */
2331 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
2333 E1000_WRITE_FLUSH(hw);
2337 igbvf_stop_adapter(struct rte_eth_dev *dev)
2341 struct rte_eth_dev_info dev_info;
2342 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2344 memset(&dev_info, 0, sizeof(dev_info));
2345 eth_igbvf_infos_get(dev, &dev_info);
2347 /* Clear interrupt mask to stop from interrupts being generated */
2348 igbvf_intr_disable(hw);
2350 /* Clear any pending interrupts, flush previous writes */
2351 E1000_READ_REG(hw, E1000_EICR);
2353 /* Disable the transmit unit. Each queue must be disabled. */
2354 for (i = 0; i < dev_info.max_tx_queues; i++)
2355 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
2357 /* Disable the receive unit by stopping each queue */
2358 for (i = 0; i < dev_info.max_rx_queues; i++) {
2359 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
2360 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
2361 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
2362 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
2366 /* flush all queues disables */
2367 E1000_WRITE_FLUSH(hw);
2371 static int eth_igbvf_link_update(struct e1000_hw *hw)
2373 struct e1000_mbx_info *mbx = &hw->mbx;
2374 struct e1000_mac_info *mac = &hw->mac;
2375 int ret_val = E1000_SUCCESS;
2377 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
2380 * We only want to run this if there has been a rst asserted.
2381 * in this case that could mean a link change, device reset,
2382 * or a virtual function reset
2385 /* If we were hit with a reset or timeout drop the link */
2386 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
2387 mac->get_link_status = TRUE;
2389 if (!mac->get_link_status)
2392 /* if link status is down no point in checking to see if pf is up */
2393 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
2396 /* if we passed all the tests above then the link is up and we no
2397 * longer need to check for link */
2398 mac->get_link_status = FALSE;
2406 igbvf_dev_configure(struct rte_eth_dev *dev)
2408 struct rte_eth_conf* conf = &dev->data->dev_conf;
2410 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
2411 dev->data->port_id);
2414 * VF has no ability to enable/disable HW CRC
2415 * Keep the persistent behavior the same as Host PF
2417 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
2418 if (!conf->rxmode.hw_strip_crc) {
2419 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
2420 conf->rxmode.hw_strip_crc = 1;
2423 if (conf->rxmode.hw_strip_crc) {
2424 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
2425 conf->rxmode.hw_strip_crc = 0;
2433 igbvf_dev_start(struct rte_eth_dev *dev)
2435 struct e1000_hw *hw =
2436 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2437 struct e1000_adapter *adapter =
2438 E1000_DEV_PRIVATE(dev->data->dev_private);
2441 PMD_INIT_FUNC_TRACE();
2443 hw->mac.ops.reset_hw(hw);
2444 adapter->stopped = 0;
2447 igbvf_set_vfta_all(dev,1);
2449 eth_igbvf_tx_init(dev);
2451 /* This can fail when allocating mbufs for descriptor rings */
2452 ret = eth_igbvf_rx_init(dev);
2454 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2455 igb_dev_clear_queues(dev);
2463 igbvf_dev_stop(struct rte_eth_dev *dev)
2465 PMD_INIT_FUNC_TRACE();
2467 igbvf_stop_adapter(dev);
2470 * Clear what we set, but we still keep shadow_vfta to
2471 * restore after device starts
2473 igbvf_set_vfta_all(dev,0);
2475 igb_dev_clear_queues(dev);
2479 igbvf_dev_close(struct rte_eth_dev *dev)
2481 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2482 struct e1000_adapter *adapter =
2483 E1000_DEV_PRIVATE(dev->data->dev_private);
2485 PMD_INIT_FUNC_TRACE();
2489 igbvf_dev_stop(dev);
2490 adapter->stopped = 1;
2491 igb_dev_free_queues(dev);
2494 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
2496 struct e1000_mbx_info *mbx = &hw->mbx;
2499 /* After set vlan, vlan strip will also be enabled in igb driver*/
2500 msgbuf[0] = E1000_VF_SET_VLAN;
2502 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
2504 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
2506 return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
2509 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
2511 struct e1000_hw *hw =
2512 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2513 struct e1000_vfta * shadow_vfta =
2514 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2515 int i = 0, j = 0, vfta = 0, mask = 1;
2517 for (i = 0; i < IGB_VFTA_SIZE; i++){
2518 vfta = shadow_vfta->vfta[i];
2521 for (j = 0; j < 32; j++){
2524 (uint16_t)((i<<5)+j), on);
2533 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2535 struct e1000_hw *hw =
2536 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2537 struct e1000_vfta * shadow_vfta =
2538 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2539 uint32_t vid_idx = 0;
2540 uint32_t vid_bit = 0;
2543 PMD_INIT_FUNC_TRACE();
2545 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
2546 ret = igbvf_set_vfta(hw, vlan_id, !!on);
2548 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
2551 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
2552 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
2554 /*Save what we set and retore it after device reset*/
2556 shadow_vfta->vfta[vid_idx] |= vid_bit;
2558 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
2564 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
2566 struct e1000_hw *hw =
2567 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2569 /* index is not used by rar_set() */
2570 hw->mac.ops.rar_set(hw, (void *)addr, 0);
2575 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
2576 struct rte_eth_rss_reta_entry64 *reta_conf,
2581 uint16_t idx, shift;
2582 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2584 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2585 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2586 "(%d) doesn't match the number hardware can supported "
2587 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2591 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2592 idx = i / RTE_RETA_GROUP_SIZE;
2593 shift = i % RTE_RETA_GROUP_SIZE;
2594 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2598 if (mask == IGB_4_BIT_MASK)
2601 r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2602 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
2603 if (mask & (0x1 << j))
2604 reta |= reta_conf[idx].reta[shift + j] <<
2607 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
2609 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
2616 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
2617 struct rte_eth_rss_reta_entry64 *reta_conf,
2622 uint16_t idx, shift;
2623 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2625 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2626 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2627 "(%d) doesn't match the number hardware can supported "
2628 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2632 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2633 idx = i / RTE_RETA_GROUP_SIZE;
2634 shift = i % RTE_RETA_GROUP_SIZE;
2635 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2639 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2640 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
2641 if (mask & (0x1 << j))
2642 reta_conf[idx].reta[shift + j] =
2643 ((reta >> (CHAR_BIT * j)) &
2651 #define MAC_TYPE_FILTER_SUP(type) do {\
2652 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
2653 (type) != e1000_82576)\
2658 eth_igb_syn_filter_set(struct rte_eth_dev *dev,
2659 struct rte_eth_syn_filter *filter,
2662 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2663 uint32_t synqf, rfctl;
2665 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2668 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2671 if (synqf & E1000_SYN_FILTER_ENABLE)
2674 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
2675 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
2677 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2678 if (filter->hig_pri)
2679 rfctl |= E1000_RFCTL_SYNQFP;
2681 rfctl &= ~E1000_RFCTL_SYNQFP;
2683 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
2685 if (!(synqf & E1000_SYN_FILTER_ENABLE))
2690 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
2691 E1000_WRITE_FLUSH(hw);
2696 eth_igb_syn_filter_get(struct rte_eth_dev *dev,
2697 struct rte_eth_syn_filter *filter)
2699 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2700 uint32_t synqf, rfctl;
2702 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2703 if (synqf & E1000_SYN_FILTER_ENABLE) {
2704 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2705 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
2706 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
2707 E1000_SYN_FILTER_QUEUE_SHIFT);
2715 eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
2716 enum rte_filter_op filter_op,
2719 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2722 MAC_TYPE_FILTER_SUP(hw->mac.type);
2724 if (filter_op == RTE_ETH_FILTER_NOP)
2728 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
2733 switch (filter_op) {
2734 case RTE_ETH_FILTER_ADD:
2735 ret = eth_igb_syn_filter_set(dev,
2736 (struct rte_eth_syn_filter *)arg,
2739 case RTE_ETH_FILTER_DELETE:
2740 ret = eth_igb_syn_filter_set(dev,
2741 (struct rte_eth_syn_filter *)arg,
2744 case RTE_ETH_FILTER_GET:
2745 ret = eth_igb_syn_filter_get(dev,
2746 (struct rte_eth_syn_filter *)arg);
2749 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
2757 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
2758 if ((type) != e1000_82580 && (type) != e1000_i350)\
2762 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
2764 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
2765 struct e1000_2tuple_filter_info *filter_info)
2767 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2769 if (filter->priority > E1000_2TUPLE_MAX_PRI)
2770 return -EINVAL; /* filter index is out of range. */
2771 if (filter->tcp_flags > TCP_FLAG_ALL)
2772 return -EINVAL; /* flags is invalid. */
2774 switch (filter->dst_port_mask) {
2776 filter_info->dst_port_mask = 0;
2777 filter_info->dst_port = filter->dst_port;
2780 filter_info->dst_port_mask = 1;
2783 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
2787 switch (filter->proto_mask) {
2789 filter_info->proto_mask = 0;
2790 filter_info->proto = filter->proto;
2793 filter_info->proto_mask = 1;
2796 PMD_DRV_LOG(ERR, "invalid protocol mask.");
2800 filter_info->priority = (uint8_t)filter->priority;
2801 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
2802 filter_info->tcp_flags = filter->tcp_flags;
2804 filter_info->tcp_flags = 0;
2809 static inline struct e1000_2tuple_filter *
2810 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
2811 struct e1000_2tuple_filter_info *key)
2813 struct e1000_2tuple_filter *it;
2815 TAILQ_FOREACH(it, filter_list, entries) {
2816 if (memcmp(key, &it->filter_info,
2817 sizeof(struct e1000_2tuple_filter_info)) == 0) {
2825 * igb_add_2tuple_filter - add a 2tuple filter
2828 * dev: Pointer to struct rte_eth_dev.
2829 * ntuple_filter: ponter to the filter that will be added.
2832 * - On success, zero.
2833 * - On failure, a negative value.
2836 igb_add_2tuple_filter(struct rte_eth_dev *dev,
2837 struct rte_eth_ntuple_filter *ntuple_filter)
2839 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2840 struct e1000_filter_info *filter_info =
2841 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2842 struct e1000_2tuple_filter *filter;
2843 uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
2844 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
2847 filter = rte_zmalloc("e1000_2tuple_filter",
2848 sizeof(struct e1000_2tuple_filter), 0);
2852 ret = ntuple_filter_to_2tuple(ntuple_filter,
2853 &filter->filter_info);
2858 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2859 &filter->filter_info) != NULL) {
2860 PMD_DRV_LOG(ERR, "filter exists.");
2864 filter->queue = ntuple_filter->queue;
2867 * look for an unused 2tuple filter index,
2868 * and insert the filter to list.
2870 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
2871 if (!(filter_info->twotuple_mask & (1 << i))) {
2872 filter_info->twotuple_mask |= 1 << i;
2874 TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
2880 if (i >= E1000_MAX_TTQF_FILTERS) {
2881 PMD_DRV_LOG(ERR, "2tuple filters are full.");
2886 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
2887 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
2888 imir |= E1000_IMIR_PORT_BP;
2890 imir &= ~E1000_IMIR_PORT_BP;
2892 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
2894 ttqf |= E1000_TTQF_QUEUE_ENABLE;
2895 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
2896 ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
2897 if (filter->filter_info.proto_mask == 0)
2898 ttqf &= ~E1000_TTQF_MASK_ENABLE;
2900 /* tcp flags bits setting. */
2901 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
2902 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
2903 imir_ext |= E1000_IMIREXT_CTRL_URG;
2904 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
2905 imir_ext |= E1000_IMIREXT_CTRL_ACK;
2906 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
2907 imir_ext |= E1000_IMIREXT_CTRL_PSH;
2908 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
2909 imir_ext |= E1000_IMIREXT_CTRL_RST;
2910 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
2911 imir_ext |= E1000_IMIREXT_CTRL_SYN;
2912 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
2913 imir_ext |= E1000_IMIREXT_CTRL_FIN;
2915 imir_ext |= E1000_IMIREXT_CTRL_BP;
2916 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
2917 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
2918 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
2923 * igb_remove_2tuple_filter - remove a 2tuple filter
2926 * dev: Pointer to struct rte_eth_dev.
2927 * ntuple_filter: ponter to the filter that will be removed.
2930 * - On success, zero.
2931 * - On failure, a negative value.
2934 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
2935 struct rte_eth_ntuple_filter *ntuple_filter)
2937 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2938 struct e1000_filter_info *filter_info =
2939 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2940 struct e1000_2tuple_filter_info filter_2tuple;
2941 struct e1000_2tuple_filter *filter;
2944 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
2945 ret = ntuple_filter_to_2tuple(ntuple_filter,
2950 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2952 if (filter == NULL) {
2953 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2957 filter_info->twotuple_mask &= ~(1 << filter->index);
2958 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
2961 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
2962 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
2963 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
2967 static inline struct e1000_flex_filter *
2968 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
2969 struct e1000_flex_filter_info *key)
2971 struct e1000_flex_filter *it;
2973 TAILQ_FOREACH(it, filter_list, entries) {
2974 if (memcmp(key, &it->filter_info,
2975 sizeof(struct e1000_flex_filter_info)) == 0)
2983 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
2984 struct rte_eth_flex_filter *filter,
2987 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2988 struct e1000_filter_info *filter_info =
2989 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2990 struct e1000_flex_filter *flex_filter, *it;
2991 uint32_t wufc, queueing, mask;
2993 uint8_t shift, i, j = 0;
2995 flex_filter = rte_zmalloc("e1000_flex_filter",
2996 sizeof(struct e1000_flex_filter), 0);
2997 if (flex_filter == NULL)
3000 flex_filter->filter_info.len = filter->len;
3001 flex_filter->filter_info.priority = filter->priority;
3002 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
3003 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
3005 /* reverse bits in flex filter's mask*/
3006 for (shift = 0; shift < CHAR_BIT; shift++) {
3007 if (filter->mask[i] & (0x01 << shift))
3008 mask |= (0x80 >> shift);
3010 flex_filter->filter_info.mask[i] = mask;
3013 wufc = E1000_READ_REG(hw, E1000_WUFC);
3014 if (flex_filter->index < E1000_MAX_FHFT)
3015 reg_off = E1000_FHFT(flex_filter->index);
3017 reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
3020 if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
3021 &flex_filter->filter_info) != NULL) {
3022 PMD_DRV_LOG(ERR, "filter exists.");
3023 rte_free(flex_filter);
3026 flex_filter->queue = filter->queue;
3028 * look for an unused flex filter index
3029 * and insert the filter into the list.
3031 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
3032 if (!(filter_info->flex_mask & (1 << i))) {
3033 filter_info->flex_mask |= 1 << i;
3034 flex_filter->index = i;
3035 TAILQ_INSERT_TAIL(&filter_info->flex_list,
3041 if (i >= E1000_MAX_FLEX_FILTERS) {
3042 PMD_DRV_LOG(ERR, "flex filters are full.");
3043 rte_free(flex_filter);
3047 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
3048 (E1000_WUFC_FLX0 << flex_filter->index));
3049 queueing = filter->len |
3050 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
3051 (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
3052 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
3054 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
3055 E1000_WRITE_REG(hw, reg_off,
3056 flex_filter->filter_info.dwords[j]);
3057 reg_off += sizeof(uint32_t);
3058 E1000_WRITE_REG(hw, reg_off,
3059 flex_filter->filter_info.dwords[++j]);
3060 reg_off += sizeof(uint32_t);
3061 E1000_WRITE_REG(hw, reg_off,
3062 (uint32_t)flex_filter->filter_info.mask[i]);
3063 reg_off += sizeof(uint32_t) * 2;
3067 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3068 &flex_filter->filter_info);
3070 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3071 rte_free(flex_filter);
3075 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
3076 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
3077 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
3078 (~(E1000_WUFC_FLX0 << it->index)));
3080 filter_info->flex_mask &= ~(1 << it->index);
3081 TAILQ_REMOVE(&filter_info->flex_list, it, entries);
3083 rte_free(flex_filter);
3090 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
3091 struct rte_eth_flex_filter *filter)
3093 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3094 struct e1000_filter_info *filter_info =
3095 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3096 struct e1000_flex_filter flex_filter, *it;
3097 uint32_t wufc, queueing, wufc_en = 0;
3099 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
3100 flex_filter.filter_info.len = filter->len;
3101 flex_filter.filter_info.priority = filter->priority;
3102 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
3103 memcpy(flex_filter.filter_info.mask, filter->mask,
3104 RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
3106 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3107 &flex_filter.filter_info);
3109 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3113 wufc = E1000_READ_REG(hw, E1000_WUFC);
3114 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
3116 if ((wufc & wufc_en) == wufc_en) {
3117 uint32_t reg_off = 0;
3118 if (it->index < E1000_MAX_FHFT)
3119 reg_off = E1000_FHFT(it->index);
3121 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
3123 queueing = E1000_READ_REG(hw,
3124 reg_off + E1000_FHFT_QUEUEING_OFFSET);
3125 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
3126 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
3127 E1000_FHFT_QUEUEING_PRIO_SHIFT;
3128 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
3129 E1000_FHFT_QUEUEING_QUEUE_SHIFT;
3136 eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
3137 enum rte_filter_op filter_op,
3140 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3141 struct rte_eth_flex_filter *filter;
3144 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
3146 if (filter_op == RTE_ETH_FILTER_NOP)
3150 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
3155 filter = (struct rte_eth_flex_filter *)arg;
3156 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
3157 || filter->len % sizeof(uint64_t) != 0) {
3158 PMD_DRV_LOG(ERR, "filter's length is out of range");
3161 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
3162 PMD_DRV_LOG(ERR, "filter's priority is out of range");
3166 switch (filter_op) {
3167 case RTE_ETH_FILTER_ADD:
3168 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
3170 case RTE_ETH_FILTER_DELETE:
3171 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
3173 case RTE_ETH_FILTER_GET:
3174 ret = eth_igb_get_flex_filter(dev, filter);
3177 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
3185 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
3187 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
3188 struct e1000_5tuple_filter_info *filter_info)
3190 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
3192 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3193 return -EINVAL; /* filter index is out of range. */
3194 if (filter->tcp_flags > TCP_FLAG_ALL)
3195 return -EINVAL; /* flags is invalid. */
3197 switch (filter->dst_ip_mask) {
3199 filter_info->dst_ip_mask = 0;
3200 filter_info->dst_ip = filter->dst_ip;
3203 filter_info->dst_ip_mask = 1;
3206 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3210 switch (filter->src_ip_mask) {
3212 filter_info->src_ip_mask = 0;
3213 filter_info->src_ip = filter->src_ip;
3216 filter_info->src_ip_mask = 1;
3219 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3223 switch (filter->dst_port_mask) {
3225 filter_info->dst_port_mask = 0;
3226 filter_info->dst_port = filter->dst_port;
3229 filter_info->dst_port_mask = 1;
3232 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3236 switch (filter->src_port_mask) {
3238 filter_info->src_port_mask = 0;
3239 filter_info->src_port = filter->src_port;
3242 filter_info->src_port_mask = 1;
3245 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3249 switch (filter->proto_mask) {
3251 filter_info->proto_mask = 0;
3252 filter_info->proto = filter->proto;
3255 filter_info->proto_mask = 1;
3258 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3262 filter_info->priority = (uint8_t)filter->priority;
3263 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3264 filter_info->tcp_flags = filter->tcp_flags;
3266 filter_info->tcp_flags = 0;
3271 static inline struct e1000_5tuple_filter *
3272 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
3273 struct e1000_5tuple_filter_info *key)
3275 struct e1000_5tuple_filter *it;
3277 TAILQ_FOREACH(it, filter_list, entries) {
3278 if (memcmp(key, &it->filter_info,
3279 sizeof(struct e1000_5tuple_filter_info)) == 0) {
3287 * igb_add_5tuple_filter_82576 - add a 5tuple filter
3290 * dev: Pointer to struct rte_eth_dev.
3291 * ntuple_filter: ponter to the filter that will be added.
3294 * - On success, zero.
3295 * - On failure, a negative value.
3298 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
3299 struct rte_eth_ntuple_filter *ntuple_filter)
3301 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3302 struct e1000_filter_info *filter_info =
3303 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3304 struct e1000_5tuple_filter *filter;
3305 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
3306 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3310 filter = rte_zmalloc("e1000_5tuple_filter",
3311 sizeof(struct e1000_5tuple_filter), 0);
3315 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3316 &filter->filter_info);
3322 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3323 &filter->filter_info) != NULL) {
3324 PMD_DRV_LOG(ERR, "filter exists.");
3328 filter->queue = ntuple_filter->queue;
3331 * look for an unused 5tuple filter index,
3332 * and insert the filter to list.
3334 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
3335 if (!(filter_info->fivetuple_mask & (1 << i))) {
3336 filter_info->fivetuple_mask |= 1 << i;
3338 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3344 if (i >= E1000_MAX_FTQF_FILTERS) {
3345 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3350 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
3351 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
3352 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
3353 if (filter->filter_info.dst_ip_mask == 0)
3354 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
3355 if (filter->filter_info.src_port_mask == 0)
3356 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
3357 if (filter->filter_info.proto_mask == 0)
3358 ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
3359 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
3360 E1000_FTQF_QUEUE_MASK;
3361 ftqf |= E1000_FTQF_QUEUE_ENABLE;
3362 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
3363 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
3364 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
3366 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
3367 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
3369 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3370 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3371 imir |= E1000_IMIR_PORT_BP;
3373 imir &= ~E1000_IMIR_PORT_BP;
3374 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3376 /* tcp flags bits setting. */
3377 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
3378 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
3379 imir_ext |= E1000_IMIREXT_CTRL_URG;
3380 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
3381 imir_ext |= E1000_IMIREXT_CTRL_ACK;
3382 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
3383 imir_ext |= E1000_IMIREXT_CTRL_PSH;
3384 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
3385 imir_ext |= E1000_IMIREXT_CTRL_RST;
3386 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
3387 imir_ext |= E1000_IMIREXT_CTRL_SYN;
3388 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
3389 imir_ext |= E1000_IMIREXT_CTRL_FIN;
3391 imir_ext |= E1000_IMIREXT_CTRL_BP;
3392 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3393 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3398 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
3401 * dev: Pointer to struct rte_eth_dev.
3402 * ntuple_filter: ponter to the filter that will be removed.
3405 * - On success, zero.
3406 * - On failure, a negative value.
3409 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
3410 struct rte_eth_ntuple_filter *ntuple_filter)
3412 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3413 struct e1000_filter_info *filter_info =
3414 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3415 struct e1000_5tuple_filter_info filter_5tuple;
3416 struct e1000_5tuple_filter *filter;
3419 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
3420 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3425 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3427 if (filter == NULL) {
3428 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3432 filter_info->fivetuple_mask &= ~(1 << filter->index);
3433 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3436 E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
3437 E1000_FTQF_VF_BP | E1000_FTQF_MASK);
3438 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
3439 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
3440 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
3441 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3442 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3447 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3450 struct e1000_hw *hw;
3451 struct rte_eth_dev_info dev_info;
3452 uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
3455 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3457 #ifdef RTE_LIBRTE_82571_SUPPORT
3458 /* XXX: not bigger than max_rx_pktlen */
3459 if (hw->mac.type == e1000_82571)
3462 eth_igb_infos_get(dev, &dev_info);
3464 /* check that mtu is within the allowed range */
3465 if ((mtu < ETHER_MIN_MTU) ||
3466 (frame_size > dev_info.max_rx_pktlen))
3469 /* refuse mtu that requires the support of scattered packets when this
3470 * feature has not been enabled before. */
3471 if (!dev->data->scattered_rx &&
3472 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
3475 rctl = E1000_READ_REG(hw, E1000_RCTL);
3477 /* switch to jumbo mode if needed */
3478 if (frame_size > ETHER_MAX_LEN) {
3479 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3480 rctl |= E1000_RCTL_LPE;
3482 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3483 rctl &= ~E1000_RCTL_LPE;
3485 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3487 /* update max frame size */
3488 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3490 E1000_WRITE_REG(hw, E1000_RLPML,
3491 dev->data->dev_conf.rxmode.max_rx_pkt_len);
3497 * igb_add_del_ntuple_filter - add or delete a ntuple filter
3500 * dev: Pointer to struct rte_eth_dev.
3501 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3502 * add: if true, add filter, if false, remove filter
3505 * - On success, zero.
3506 * - On failure, a negative value.
3509 igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
3510 struct rte_eth_ntuple_filter *ntuple_filter,
3513 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3516 switch (ntuple_filter->flags) {
3517 case RTE_5TUPLE_FLAGS:
3518 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3519 if (hw->mac.type != e1000_82576)
3522 ret = igb_add_5tuple_filter_82576(dev,
3525 ret = igb_remove_5tuple_filter_82576(dev,
3528 case RTE_2TUPLE_FLAGS:
3529 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3530 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3533 ret = igb_add_2tuple_filter(dev, ntuple_filter);
3535 ret = igb_remove_2tuple_filter(dev, ntuple_filter);
3546 * igb_get_ntuple_filter - get a ntuple filter
3549 * dev: Pointer to struct rte_eth_dev.
3550 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3553 * - On success, zero.
3554 * - On failure, a negative value.
3557 igb_get_ntuple_filter(struct rte_eth_dev *dev,
3558 struct rte_eth_ntuple_filter *ntuple_filter)
3560 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3561 struct e1000_filter_info *filter_info =
3562 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3563 struct e1000_5tuple_filter_info filter_5tuple;
3564 struct e1000_2tuple_filter_info filter_2tuple;
3565 struct e1000_5tuple_filter *p_5tuple_filter;
3566 struct e1000_2tuple_filter *p_2tuple_filter;
3569 switch (ntuple_filter->flags) {
3570 case RTE_5TUPLE_FLAGS:
3571 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3572 if (hw->mac.type != e1000_82576)
3574 memset(&filter_5tuple,
3576 sizeof(struct e1000_5tuple_filter_info));
3577 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3581 p_5tuple_filter = igb_5tuple_filter_lookup_82576(
3582 &filter_info->fivetuple_list,
3584 if (p_5tuple_filter == NULL) {
3585 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3588 ntuple_filter->queue = p_5tuple_filter->queue;
3590 case RTE_2TUPLE_FLAGS:
3591 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3592 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3594 memset(&filter_2tuple,
3596 sizeof(struct e1000_2tuple_filter_info));
3597 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
3600 p_2tuple_filter = igb_2tuple_filter_lookup(
3601 &filter_info->twotuple_list,
3603 if (p_2tuple_filter == NULL) {
3604 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3607 ntuple_filter->queue = p_2tuple_filter->queue;
3618 * igb_ntuple_filter_handle - Handle operations for ntuple filter.
3619 * @dev: pointer to rte_eth_dev structure
3620 * @filter_op:operation will be taken.
3621 * @arg: a pointer to specific structure corresponding to the filter_op
3624 igb_ntuple_filter_handle(struct rte_eth_dev *dev,
3625 enum rte_filter_op filter_op,
3628 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3631 MAC_TYPE_FILTER_SUP(hw->mac.type);
3633 if (filter_op == RTE_ETH_FILTER_NOP)
3637 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3642 switch (filter_op) {
3643 case RTE_ETH_FILTER_ADD:
3644 ret = igb_add_del_ntuple_filter(dev,
3645 (struct rte_eth_ntuple_filter *)arg,
3648 case RTE_ETH_FILTER_DELETE:
3649 ret = igb_add_del_ntuple_filter(dev,
3650 (struct rte_eth_ntuple_filter *)arg,
3653 case RTE_ETH_FILTER_GET:
3654 ret = igb_get_ntuple_filter(dev,
3655 (struct rte_eth_ntuple_filter *)arg);
3658 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3666 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
3671 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3672 if (filter_info->ethertype_filters[i] == ethertype &&
3673 (filter_info->ethertype_mask & (1 << i)))
3680 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
3685 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3686 if (!(filter_info->ethertype_mask & (1 << i))) {
3687 filter_info->ethertype_mask |= 1 << i;
3688 filter_info->ethertype_filters[i] = ethertype;
3696 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
3699 if (idx >= E1000_MAX_ETQF_FILTERS)
3701 filter_info->ethertype_mask &= ~(1 << idx);
3702 filter_info->ethertype_filters[idx] = 0;
3708 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
3709 struct rte_eth_ethertype_filter *filter,
3712 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3713 struct e1000_filter_info *filter_info =
3714 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3718 if (filter->ether_type == ETHER_TYPE_IPv4 ||
3719 filter->ether_type == ETHER_TYPE_IPv6) {
3720 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
3721 " ethertype filter.", filter->ether_type);
3725 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
3726 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
3729 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
3730 PMD_DRV_LOG(ERR, "drop option is unsupported.");
3734 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3735 if (ret >= 0 && add) {
3736 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
3737 filter->ether_type);
3740 if (ret < 0 && !add) {
3741 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3742 filter->ether_type);
3747 ret = igb_ethertype_filter_insert(filter_info,
3748 filter->ether_type);
3750 PMD_DRV_LOG(ERR, "ethertype filters are full.");
3754 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
3755 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
3756 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
3758 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
3762 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
3763 E1000_WRITE_FLUSH(hw);
3769 igb_get_ethertype_filter(struct rte_eth_dev *dev,
3770 struct rte_eth_ethertype_filter *filter)
3772 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3773 struct e1000_filter_info *filter_info =
3774 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3778 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3780 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3781 filter->ether_type);
3785 etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
3786 if (etqf & E1000_ETQF_FILTER_ENABLE) {
3787 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
3789 filter->queue = (etqf & E1000_ETQF_QUEUE) >>
3790 E1000_ETQF_QUEUE_SHIFT;
3798 * igb_ethertype_filter_handle - Handle operations for ethertype filter.
3799 * @dev: pointer to rte_eth_dev structure
3800 * @filter_op:operation will be taken.
3801 * @arg: a pointer to specific structure corresponding to the filter_op
3804 igb_ethertype_filter_handle(struct rte_eth_dev *dev,
3805 enum rte_filter_op filter_op,
3808 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3811 MAC_TYPE_FILTER_SUP(hw->mac.type);
3813 if (filter_op == RTE_ETH_FILTER_NOP)
3817 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3822 switch (filter_op) {
3823 case RTE_ETH_FILTER_ADD:
3824 ret = igb_add_del_ethertype_filter(dev,
3825 (struct rte_eth_ethertype_filter *)arg,
3828 case RTE_ETH_FILTER_DELETE:
3829 ret = igb_add_del_ethertype_filter(dev,
3830 (struct rte_eth_ethertype_filter *)arg,
3833 case RTE_ETH_FILTER_GET:
3834 ret = igb_get_ethertype_filter(dev,
3835 (struct rte_eth_ethertype_filter *)arg);
3838 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3846 eth_igb_filter_ctrl(struct rte_eth_dev *dev,
3847 enum rte_filter_type filter_type,
3848 enum rte_filter_op filter_op,
3853 switch (filter_type) {
3854 case RTE_ETH_FILTER_NTUPLE:
3855 ret = igb_ntuple_filter_handle(dev, filter_op, arg);
3857 case RTE_ETH_FILTER_ETHERTYPE:
3858 ret = igb_ethertype_filter_handle(dev, filter_op, arg);
3860 case RTE_ETH_FILTER_SYN:
3861 ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
3863 case RTE_ETH_FILTER_FLEXIBLE:
3864 ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
3867 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3876 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
3877 struct ether_addr *mc_addr_set,
3878 uint32_t nb_mc_addr)
3880 struct e1000_hw *hw;
3882 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3883 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
3888 igb_timesync_enable(struct rte_eth_dev *dev)
3890 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3894 /* Enable system time for it isn't on by default. */
3895 tsauxc = E1000_READ_REG(hw, E1000_TSAUXC);
3896 tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME;
3897 E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc);
3899 /* Start incrementing the register used to timestamp PTP packets. */
3900 E1000_WRITE_REG(hw, E1000_TIMINCA, E1000_TIMINCA_INIT);
3902 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3903 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
3905 E1000_ETQF_FILTER_ENABLE |
3908 /* Enable timestamping of received PTP packets. */
3909 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3910 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
3911 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
3913 /* Enable Timestamping of transmitted PTP packets. */
3914 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3915 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
3916 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
3922 igb_timesync_disable(struct rte_eth_dev *dev)
3924 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3927 /* Disable timestamping of transmitted PTP packets. */
3928 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3929 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
3930 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
3932 /* Disable timestamping of received PTP packets. */
3933 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3934 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
3935 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
3937 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3938 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
3940 /* Stop incrementating the System Time registers. */
3941 E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
3947 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3948 struct timespec *timestamp,
3949 uint32_t flags __rte_unused)
3951 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3952 uint32_t tsync_rxctl;
3956 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3957 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
3960 rx_stmpl = E1000_READ_REG(hw, E1000_RXSTMPL);
3961 rx_stmph = E1000_READ_REG(hw, E1000_RXSTMPH);
3963 timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
3964 timestamp->tv_nsec = 0;
3970 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3971 struct timespec *timestamp)
3973 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3974 uint32_t tsync_txctl;
3978 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3979 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
3982 tx_stmpl = E1000_READ_REG(hw, E1000_TXSTMPL);
3983 tx_stmph = E1000_READ_REG(hw, E1000_TXSTMPH);
3985 timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
3986 timestamp->tv_nsec = 0;
3992 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
3996 const struct reg_info *reg_group;
3998 while ((reg_group = igb_regs[g_ind++]))
3999 count += igb_reg_group_count(reg_group);
4005 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4009 const struct reg_info *reg_group;
4011 while ((reg_group = igbvf_regs[g_ind++]))
4012 count += igb_reg_group_count(reg_group);
4018 eth_igb_get_regs(struct rte_eth_dev *dev,
4019 struct rte_dev_reg_info *regs)
4021 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4022 uint32_t *data = regs->data;
4025 const struct reg_info *reg_group;
4027 /* Support only full register dump */
4028 if ((regs->length == 0) ||
4029 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
4030 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4032 while ((reg_group = igb_regs[g_ind++]))
4033 count += igb_read_regs_group(dev, &data[count],
4042 igbvf_get_regs(struct rte_eth_dev *dev,
4043 struct rte_dev_reg_info *regs)
4045 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4046 uint32_t *data = regs->data;
4049 const struct reg_info *reg_group;
4051 /* Support only full register dump */
4052 if ((regs->length == 0) ||
4053 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
4054 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4056 while ((reg_group = igbvf_regs[g_ind++]))
4057 count += igb_read_regs_group(dev, &data[count],
4066 eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
4068 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4070 /* Return unit is byte count */
4071 return hw->nvm.word_size * 2;
4075 eth_igb_get_eeprom(struct rte_eth_dev *dev,
4076 struct rte_dev_eeprom_info *in_eeprom)
4078 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4079 struct e1000_nvm_info *nvm = &hw->nvm;
4080 uint16_t *data = in_eeprom->data;
4083 first = in_eeprom->offset >> 1;
4084 length = in_eeprom->length >> 1;
4085 if ((first >= hw->nvm.word_size) ||
4086 ((first + length) >= hw->nvm.word_size))
4089 in_eeprom->magic = hw->vendor_id |
4090 ((uint32_t)hw->device_id << 16);
4092 if ((nvm->ops.read) == NULL)
4095 return nvm->ops.read(hw, first, length, data);
4099 eth_igb_set_eeprom(struct rte_eth_dev *dev,
4100 struct rte_dev_eeprom_info *in_eeprom)
4102 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4103 struct e1000_nvm_info *nvm = &hw->nvm;
4104 uint16_t *data = in_eeprom->data;
4107 first = in_eeprom->offset >> 1;
4108 length = in_eeprom->length >> 1;
4109 if ((first >= hw->nvm.word_size) ||
4110 ((first + length) >= hw->nvm.word_size))
4113 in_eeprom->magic = (uint32_t)hw->vendor_id |
4114 ((uint32_t)hw->device_id << 16);
4116 if ((nvm->ops.write) == NULL)
4118 return nvm->ops.write(hw, first, length, data);
4121 static struct rte_driver pmd_igb_drv = {
4123 .init = rte_igb_pmd_init,
4126 static struct rte_driver pmd_igbvf_drv = {
4128 .init = rte_igbvf_pmd_init,
4132 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4134 struct e1000_hw *hw =
4135 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4136 uint32_t mask = 1 << queue_id;
4138 E1000_WRITE_REG(hw, E1000_EIMC, mask);
4139 E1000_WRITE_FLUSH(hw);
4145 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4147 struct e1000_hw *hw =
4148 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4149 uint32_t mask = 1 << queue_id;
4152 regval = E1000_READ_REG(hw, E1000_EIMS);
4153 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
4154 E1000_WRITE_FLUSH(hw);
4156 rte_intr_enable(&dev->pci_dev->intr_handle);
4162 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
4163 uint8_t index, uint8_t offset)
4165 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
4168 val &= ~((uint32_t)0xFF << offset);
4170 /* write vector and valid bit */
4171 val |= (msix_vector | E1000_IVAR_VALID) << offset;
4173 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
4177 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
4178 uint8_t queue, uint8_t msix_vector)
4182 if (hw->mac.type == e1000_82575) {
4184 tmp = E1000_EICR_RX_QUEUE0 << queue;
4185 else if (direction == 1)
4186 tmp = E1000_EICR_TX_QUEUE0 << queue;
4187 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
4188 } else if (hw->mac.type == e1000_82576) {
4189 if ((direction == 0) || (direction == 1))
4190 eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
4191 ((queue & 0x8) << 1) +
4193 } else if ((hw->mac.type == e1000_82580) ||
4194 (hw->mac.type == e1000_i350) ||
4195 (hw->mac.type == e1000_i354) ||
4196 (hw->mac.type == e1000_i210) ||
4197 (hw->mac.type == e1000_i211)) {
4198 if ((direction == 0) || (direction == 1))
4199 eth_igb_write_ivar(hw, msix_vector,
4201 ((queue & 0x1) << 4) +
4206 /* Sets up the hardware to generate MSI-X interrupts properly
4208 * board private structure
4211 eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
4214 uint32_t tmpval, regval, intr_mask;
4215 struct e1000_hw *hw =
4216 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4218 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4220 /* won't configure msix register if no mapping is done
4221 * between intr vector and event fd
4223 if (!rte_intr_dp_is_en(intr_handle))
4226 /* set interrupt vector for other causes */
4227 if (hw->mac.type == e1000_82575) {
4228 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
4229 /* enable MSI-X PBA support */
4230 tmpval |= E1000_CTRL_EXT_PBA_CLR;
4232 /* Auto-Mask interrupts upon ICR read */
4233 tmpval |= E1000_CTRL_EXT_EIAME;
4234 tmpval |= E1000_CTRL_EXT_IRCA;
4236 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
4238 /* enable msix_other interrupt */
4239 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
4240 regval = E1000_READ_REG(hw, E1000_EIAC);
4241 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
4242 regval = E1000_READ_REG(hw, E1000_EIAM);
4243 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
4244 } else if ((hw->mac.type == e1000_82576) ||
4245 (hw->mac.type == e1000_82580) ||
4246 (hw->mac.type == e1000_i350) ||
4247 (hw->mac.type == e1000_i354) ||
4248 (hw->mac.type == e1000_i210) ||
4249 (hw->mac.type == e1000_i211)) {
4250 /* turn on MSI-X capability first */
4251 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
4252 E1000_GPIE_PBA | E1000_GPIE_EIAME |
4255 intr_mask = (1 << intr_handle->max_intr) - 1;
4256 regval = E1000_READ_REG(hw, E1000_EIAC);
4257 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
4259 /* enable msix_other interrupt */
4260 regval = E1000_READ_REG(hw, E1000_EIMS);
4261 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
4262 tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
4263 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
4266 /* use EIAM to auto-mask when MSI-X interrupt
4267 * is asserted, this saves a register write for every interrupt
4269 intr_mask = (1 << intr_handle->nb_efd) - 1;
4270 regval = E1000_READ_REG(hw, E1000_EIAM);
4271 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
4273 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
4274 eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
4275 intr_handle->intr_vec[queue_id] = vec;
4276 if (vec < intr_handle->nb_efd - 1)
4280 E1000_WRITE_FLUSH(hw);
4283 PMD_REGISTER_DRIVER(pmd_igb_drv);
4284 PMD_REGISTER_DRIVER(pmd_igbvf_drv);