4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
51 #include <rte_atomic.h>
52 #include <rte_malloc.h>
55 #include "e1000_logs.h"
56 #include "base/e1000_api.h"
57 #include "e1000_ethdev.h"
61 * Default values for port configuration
63 #define IGB_DEFAULT_RX_FREE_THRESH 32
64 #define IGB_DEFAULT_RX_PTHRESH 8
65 #define IGB_DEFAULT_RX_HTHRESH 8
66 #define IGB_DEFAULT_RX_WTHRESH 0
68 #define IGB_DEFAULT_TX_PTHRESH 32
69 #define IGB_DEFAULT_TX_HTHRESH 0
70 #define IGB_DEFAULT_TX_WTHRESH 0
72 #define IGB_HKEY_MAX_INDEX 10
74 /* Bit shift and mask */
75 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
76 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
77 #define IGB_8_BIT_WIDTH CHAR_BIT
78 #define IGB_8_BIT_MASK UINT8_MAX
80 /* Additional timesync values. */
81 #define E1000_ETQF_FILTER_1588 3
82 #define E1000_TIMINCA_INCVALUE 16000000
83 #define E1000_TIMINCA_INIT ((0x02 << E1000_TIMINCA_16NS_SHIFT) \
84 | E1000_TIMINCA_INCVALUE)
85 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
87 static int eth_igb_configure(struct rte_eth_dev *dev);
88 static int eth_igb_start(struct rte_eth_dev *dev);
89 static void eth_igb_stop(struct rte_eth_dev *dev);
90 static void eth_igb_close(struct rte_eth_dev *dev);
91 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
92 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
93 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
94 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
95 static int eth_igb_link_update(struct rte_eth_dev *dev,
96 int wait_to_complete);
97 static void eth_igb_stats_get(struct rte_eth_dev *dev,
98 struct rte_eth_stats *rte_stats);
99 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
100 static void eth_igb_infos_get(struct rte_eth_dev *dev,
101 struct rte_eth_dev_info *dev_info);
102 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
103 struct rte_eth_dev_info *dev_info);
104 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
105 struct rte_eth_fc_conf *fc_conf);
106 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
107 struct rte_eth_fc_conf *fc_conf);
108 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
109 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
110 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
111 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
112 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
114 static int igb_hardware_init(struct e1000_hw *hw);
115 static void igb_hw_control_acquire(struct e1000_hw *hw);
116 static void igb_hw_control_release(struct e1000_hw *hw);
117 static void igb_init_manageability(struct e1000_hw *hw);
118 static void igb_release_manageability(struct e1000_hw *hw);
120 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
122 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
123 uint16_t vlan_id, int on);
124 static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
125 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
127 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
128 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
129 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
130 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
131 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
132 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
134 static int eth_igb_led_on(struct rte_eth_dev *dev);
135 static int eth_igb_led_off(struct rte_eth_dev *dev);
137 static void igb_intr_disable(struct e1000_hw *hw);
138 static int igb_get_rx_buffer_size(struct e1000_hw *hw);
139 static void eth_igb_rar_set(struct rte_eth_dev *dev,
140 struct ether_addr *mac_addr,
141 uint32_t index, uint32_t pool);
142 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
143 static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
144 struct ether_addr *addr);
146 static void igbvf_intr_disable(struct e1000_hw *hw);
147 static int igbvf_dev_configure(struct rte_eth_dev *dev);
148 static int igbvf_dev_start(struct rte_eth_dev *dev);
149 static void igbvf_dev_stop(struct rte_eth_dev *dev);
150 static void igbvf_dev_close(struct rte_eth_dev *dev);
151 static int eth_igbvf_link_update(struct e1000_hw *hw);
152 static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
153 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
154 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
155 uint16_t vlan_id, int on);
156 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
157 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
158 static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
159 struct ether_addr *addr);
160 static int igbvf_get_reg_length(struct rte_eth_dev *dev);
161 static int igbvf_get_regs(struct rte_eth_dev *dev,
162 struct rte_dev_reg_info *regs);
164 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
165 struct rte_eth_rss_reta_entry64 *reta_conf,
167 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
168 struct rte_eth_rss_reta_entry64 *reta_conf,
171 static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
172 struct rte_eth_syn_filter *filter,
174 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
175 struct rte_eth_syn_filter *filter);
176 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
177 enum rte_filter_op filter_op,
179 static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
180 struct rte_eth_ntuple_filter *ntuple_filter);
181 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
182 struct rte_eth_ntuple_filter *ntuple_filter);
183 static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
184 struct rte_eth_flex_filter *filter,
186 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
187 struct rte_eth_flex_filter *filter);
188 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
189 enum rte_filter_op filter_op,
191 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
192 struct rte_eth_ntuple_filter *ntuple_filter);
193 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
194 struct rte_eth_ntuple_filter *ntuple_filter);
195 static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
196 struct rte_eth_ntuple_filter *filter,
198 static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
199 struct rte_eth_ntuple_filter *filter);
200 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
201 enum rte_filter_op filter_op,
203 static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
204 struct rte_eth_ethertype_filter *filter,
206 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
207 enum rte_filter_op filter_op,
209 static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
210 struct rte_eth_ethertype_filter *filter);
211 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
212 enum rte_filter_type filter_type,
213 enum rte_filter_op filter_op,
215 static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
216 static int eth_igb_get_regs(struct rte_eth_dev *dev,
217 struct rte_dev_reg_info *regs);
218 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
219 static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
220 struct rte_dev_eeprom_info *eeprom);
221 static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
222 struct rte_dev_eeprom_info *eeprom);
223 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
224 struct ether_addr *mc_addr_set,
225 uint32_t nb_mc_addr);
226 static int igb_timesync_enable(struct rte_eth_dev *dev);
227 static int igb_timesync_disable(struct rte_eth_dev *dev);
228 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
229 struct timespec *timestamp,
231 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
232 struct timespec *timestamp);
233 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
235 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
237 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
238 uint8_t queue, uint8_t msix_vector);
239 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
240 uint8_t index, uint8_t offset);
241 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
244 * Define VF Stats MACRO for Non "cleared on read" register
246 #define UPDATE_VF_STAT(reg, last, cur) \
248 u32 latest = E1000_READ_REG(hw, reg); \
249 cur += latest - last; \
254 #define IGB_FC_PAUSE_TIME 0x0680
255 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
256 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
258 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
260 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
263 * The set of PCI devices this driver supports
265 static const struct rte_pci_id pci_id_igb_map[] = {
267 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
268 #include "rte_pci_dev_ids.h"
274 * The set of PCI devices this driver supports (for 82576&I350 VF)
276 static const struct rte_pci_id pci_id_igbvf_map[] = {
278 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
279 #include "rte_pci_dev_ids.h"
284 static const struct eth_dev_ops eth_igb_ops = {
285 .dev_configure = eth_igb_configure,
286 .dev_start = eth_igb_start,
287 .dev_stop = eth_igb_stop,
288 .dev_close = eth_igb_close,
289 .promiscuous_enable = eth_igb_promiscuous_enable,
290 .promiscuous_disable = eth_igb_promiscuous_disable,
291 .allmulticast_enable = eth_igb_allmulticast_enable,
292 .allmulticast_disable = eth_igb_allmulticast_disable,
293 .link_update = eth_igb_link_update,
294 .stats_get = eth_igb_stats_get,
295 .stats_reset = eth_igb_stats_reset,
296 .dev_infos_get = eth_igb_infos_get,
297 .mtu_set = eth_igb_mtu_set,
298 .vlan_filter_set = eth_igb_vlan_filter_set,
299 .vlan_tpid_set = eth_igb_vlan_tpid_set,
300 .vlan_offload_set = eth_igb_vlan_offload_set,
301 .rx_queue_setup = eth_igb_rx_queue_setup,
302 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
303 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
304 .rx_queue_release = eth_igb_rx_queue_release,
305 .rx_queue_count = eth_igb_rx_queue_count,
306 .rx_descriptor_done = eth_igb_rx_descriptor_done,
307 .tx_queue_setup = eth_igb_tx_queue_setup,
308 .tx_queue_release = eth_igb_tx_queue_release,
309 .dev_led_on = eth_igb_led_on,
310 .dev_led_off = eth_igb_led_off,
311 .flow_ctrl_get = eth_igb_flow_ctrl_get,
312 .flow_ctrl_set = eth_igb_flow_ctrl_set,
313 .mac_addr_add = eth_igb_rar_set,
314 .mac_addr_remove = eth_igb_rar_clear,
315 .mac_addr_set = eth_igb_default_mac_addr_set,
316 .reta_update = eth_igb_rss_reta_update,
317 .reta_query = eth_igb_rss_reta_query,
318 .rss_hash_update = eth_igb_rss_hash_update,
319 .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
320 .filter_ctrl = eth_igb_filter_ctrl,
321 .set_mc_addr_list = eth_igb_set_mc_addr_list,
322 .timesync_enable = igb_timesync_enable,
323 .timesync_disable = igb_timesync_disable,
324 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
325 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
326 .get_reg_length = eth_igb_get_reg_length,
327 .get_reg = eth_igb_get_regs,
328 .get_eeprom_length = eth_igb_get_eeprom_length,
329 .get_eeprom = eth_igb_get_eeprom,
330 .set_eeprom = eth_igb_set_eeprom,
334 * dev_ops for virtual function, bare necessities for basic vf
335 * operation have been implemented
337 static const struct eth_dev_ops igbvf_eth_dev_ops = {
338 .dev_configure = igbvf_dev_configure,
339 .dev_start = igbvf_dev_start,
340 .dev_stop = igbvf_dev_stop,
341 .dev_close = igbvf_dev_close,
342 .link_update = eth_igb_link_update,
343 .stats_get = eth_igbvf_stats_get,
344 .stats_reset = eth_igbvf_stats_reset,
345 .vlan_filter_set = igbvf_vlan_filter_set,
346 .dev_infos_get = eth_igbvf_infos_get,
347 .rx_queue_setup = eth_igb_rx_queue_setup,
348 .rx_queue_release = eth_igb_rx_queue_release,
349 .tx_queue_setup = eth_igb_tx_queue_setup,
350 .tx_queue_release = eth_igb_tx_queue_release,
351 .set_mc_addr_list = eth_igb_set_mc_addr_list,
352 .mac_addr_set = igbvf_default_mac_addr_set,
353 .get_reg_length = igbvf_get_reg_length,
354 .get_reg = igbvf_get_regs,
358 * Atomically reads the link status information from global
359 * structure rte_eth_dev.
362 * - Pointer to the structure rte_eth_dev to read from.
363 * - Pointer to the buffer to be saved with the link status.
366 * - On success, zero.
367 * - On failure, negative value.
370 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
371 struct rte_eth_link *link)
373 struct rte_eth_link *dst = link;
374 struct rte_eth_link *src = &(dev->data->dev_link);
376 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
377 *(uint64_t *)src) == 0)
384 * Atomically writes the link status information into global
385 * structure rte_eth_dev.
388 * - Pointer to the structure rte_eth_dev to read from.
389 * - Pointer to the buffer to be saved with the link status.
392 * - On success, zero.
393 * - On failure, negative value.
396 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
397 struct rte_eth_link *link)
399 struct rte_eth_link *dst = &(dev->data->dev_link);
400 struct rte_eth_link *src = link;
402 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
403 *(uint64_t *)src) == 0)
410 igb_intr_enable(struct rte_eth_dev *dev)
412 struct e1000_interrupt *intr =
413 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
414 struct e1000_hw *hw =
415 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
417 E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
418 E1000_WRITE_FLUSH(hw);
422 igb_intr_disable(struct e1000_hw *hw)
424 E1000_WRITE_REG(hw, E1000_IMC, ~0);
425 E1000_WRITE_FLUSH(hw);
428 static inline int32_t
429 igb_pf_reset_hw(struct e1000_hw *hw)
434 status = e1000_reset_hw(hw);
436 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
437 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
438 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
439 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
440 E1000_WRITE_FLUSH(hw);
446 igb_identify_hardware(struct rte_eth_dev *dev)
448 struct e1000_hw *hw =
449 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
451 hw->vendor_id = dev->pci_dev->id.vendor_id;
452 hw->device_id = dev->pci_dev->id.device_id;
453 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
454 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
456 e1000_set_mac_type(hw);
458 /* need to check if it is a vf device below */
462 igb_reset_swfw_lock(struct e1000_hw *hw)
467 * Do mac ops initialization manually here, since we will need
468 * some function pointers set by this call.
470 ret_val = e1000_init_mac_params(hw);
475 * SMBI lock should not fail in this early stage. If this is the case,
476 * it is due to an improper exit of the application.
477 * So force the release of the faulty lock.
479 if (e1000_get_hw_semaphore_generic(hw) < 0) {
480 PMD_DRV_LOG(DEBUG, "SMBI lock released");
482 e1000_put_hw_semaphore_generic(hw);
484 if (hw->mac.ops.acquire_swfw_sync != NULL) {
488 * Phy lock should not fail in this early stage. If this is the case,
489 * it is due to an improper exit of the application.
490 * So force the release of the faulty lock.
492 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
493 if (hw->bus.func > E1000_FUNC_1)
495 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
496 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
499 hw->mac.ops.release_swfw_sync(hw, mask);
502 * This one is more tricky since it is common to all ports; but
503 * swfw_sync retries last long enough (1s) to be almost sure that if
504 * lock can not be taken it is due to an improper lock of the
507 mask = E1000_SWFW_EEP_SM;
508 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
509 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
511 hw->mac.ops.release_swfw_sync(hw, mask);
514 return E1000_SUCCESS;
518 eth_igb_dev_init(struct rte_eth_dev *eth_dev)
521 struct rte_pci_device *pci_dev;
522 struct e1000_hw *hw =
523 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
524 struct e1000_vfta * shadow_vfta =
525 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
526 struct e1000_filter_info *filter_info =
527 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
528 struct e1000_adapter *adapter =
529 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
533 pci_dev = eth_dev->pci_dev;
534 eth_dev->dev_ops = ð_igb_ops;
535 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
536 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
538 /* for secondary processes, we don't initialise any further as primary
539 * has already done this work. Only check we don't need a different
541 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
542 if (eth_dev->data->scattered_rx)
543 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
547 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
549 igb_identify_hardware(eth_dev);
550 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
555 e1000_get_bus_info(hw);
557 /* Reset any pending lock */
558 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
563 /* Finish initialization */
564 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
570 hw->phy.autoneg_wait_to_complete = 0;
571 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
574 if (hw->phy.media_type == e1000_media_type_copper) {
575 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
576 hw->phy.disable_polarity_correction = 0;
577 hw->phy.ms_type = e1000_ms_hw_default;
581 * Start from a known state, this is important in reading the nvm
586 /* Make sure we have a good EEPROM before we read from it */
587 if (e1000_validate_nvm_checksum(hw) < 0) {
589 * Some PCI-E parts fail the first check due to
590 * the link being in sleep state, call it again,
591 * if it fails a second time its a real issue.
593 if (e1000_validate_nvm_checksum(hw) < 0) {
594 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
600 /* Read the permanent MAC address out of the EEPROM */
601 if (e1000_read_mac_addr(hw) != 0) {
602 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
607 /* Allocate memory for storing MAC addresses */
608 eth_dev->data->mac_addrs = rte_zmalloc("e1000",
609 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
610 if (eth_dev->data->mac_addrs == NULL) {
611 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
612 "store MAC addresses",
613 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
618 /* Copy the permanent MAC address */
619 ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]);
621 /* initialize the vfta */
622 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
624 /* Now initialize the hardware */
625 if (igb_hardware_init(hw) != 0) {
626 PMD_INIT_LOG(ERR, "Hardware initialization failed");
627 rte_free(eth_dev->data->mac_addrs);
628 eth_dev->data->mac_addrs = NULL;
632 hw->mac.get_link_status = 1;
633 adapter->stopped = 0;
635 /* Indicate SOL/IDER usage */
636 if (e1000_check_reset_block(hw) < 0) {
637 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
641 /* initialize PF if max_vfs not zero */
642 igb_pf_host_init(eth_dev);
644 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
645 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
646 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
647 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
648 E1000_WRITE_FLUSH(hw);
650 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
651 eth_dev->data->port_id, pci_dev->id.vendor_id,
652 pci_dev->id.device_id);
654 /* enable support intr */
655 igb_intr_enable(eth_dev);
657 TAILQ_INIT(&filter_info->flex_list);
658 filter_info->flex_mask = 0;
659 TAILQ_INIT(&filter_info->twotuple_list);
660 filter_info->twotuple_mask = 0;
661 TAILQ_INIT(&filter_info->fivetuple_list);
662 filter_info->fivetuple_mask = 0;
667 igb_hw_control_release(hw);
673 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
675 struct rte_pci_device *pci_dev;
677 struct e1000_adapter *adapter =
678 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
680 PMD_INIT_FUNC_TRACE();
682 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
685 hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
686 pci_dev = eth_dev->pci_dev;
688 if (adapter->stopped == 0)
689 eth_igb_close(eth_dev);
691 eth_dev->dev_ops = NULL;
692 eth_dev->rx_pkt_burst = NULL;
693 eth_dev->tx_pkt_burst = NULL;
695 /* Reset any pending lock */
696 igb_reset_swfw_lock(hw);
698 rte_free(eth_dev->data->mac_addrs);
699 eth_dev->data->mac_addrs = NULL;
701 /* uninitialize PF if max_vfs not zero */
702 igb_pf_host_uninit(eth_dev);
704 /* disable uio intr before callback unregister */
705 rte_intr_disable(&(pci_dev->intr_handle));
706 rte_intr_callback_unregister(&(pci_dev->intr_handle),
707 eth_igb_interrupt_handler, (void *)eth_dev);
713 * Virtual Function device init
716 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
718 struct rte_pci_device *pci_dev;
719 struct e1000_adapter *adapter =
720 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
721 struct e1000_hw *hw =
722 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
725 PMD_INIT_FUNC_TRACE();
727 eth_dev->dev_ops = &igbvf_eth_dev_ops;
728 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
729 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
731 /* for secondary processes, we don't initialise any further as primary
732 * has already done this work. Only check we don't need a different
734 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
735 if (eth_dev->data->scattered_rx)
736 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
740 pci_dev = eth_dev->pci_dev;
742 hw->device_id = pci_dev->id.device_id;
743 hw->vendor_id = pci_dev->id.vendor_id;
744 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
745 adapter->stopped = 0;
747 /* Initialize the shared code (base driver) */
748 diag = e1000_setup_init_funcs(hw, TRUE);
750 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
755 /* init_mailbox_params */
756 hw->mbx.ops.init_params(hw);
758 /* Disable the interrupts for VF */
759 igbvf_intr_disable(hw);
761 diag = hw->mac.ops.reset_hw(hw);
763 /* Allocate memory for storing MAC addresses */
764 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
765 hw->mac.rar_entry_count, 0);
766 if (eth_dev->data->mac_addrs == NULL) {
768 "Failed to allocate %d bytes needed to store MAC "
770 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
774 /* Copy the permanent MAC address */
775 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
776 ð_dev->data->mac_addrs[0]);
778 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
780 eth_dev->data->port_id, pci_dev->id.vendor_id,
781 pci_dev->id.device_id, "igb_mac_82576_vf");
787 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
789 struct e1000_adapter *adapter =
790 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
792 PMD_INIT_FUNC_TRACE();
794 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
797 if (adapter->stopped == 0)
798 igbvf_dev_close(eth_dev);
800 eth_dev->dev_ops = NULL;
801 eth_dev->rx_pkt_burst = NULL;
802 eth_dev->tx_pkt_burst = NULL;
804 rte_free(eth_dev->data->mac_addrs);
805 eth_dev->data->mac_addrs = NULL;
810 static struct eth_driver rte_igb_pmd = {
812 .name = "rte_igb_pmd",
813 .id_table = pci_id_igb_map,
814 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
815 RTE_PCI_DRV_DETACHABLE,
817 .eth_dev_init = eth_igb_dev_init,
818 .eth_dev_uninit = eth_igb_dev_uninit,
819 .dev_private_size = sizeof(struct e1000_adapter),
823 * virtual function driver struct
825 static struct eth_driver rte_igbvf_pmd = {
827 .name = "rte_igbvf_pmd",
828 .id_table = pci_id_igbvf_map,
829 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
831 .eth_dev_init = eth_igbvf_dev_init,
832 .eth_dev_uninit = eth_igbvf_dev_uninit,
833 .dev_private_size = sizeof(struct e1000_adapter),
837 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
839 rte_eth_driver_register(&rte_igb_pmd);
844 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
846 struct e1000_hw *hw =
847 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
848 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
849 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
850 rctl |= E1000_RCTL_VFE;
851 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
855 * VF Driver initialization routine.
856 * Invoked one at EAL init time.
857 * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
860 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
862 PMD_INIT_FUNC_TRACE();
864 rte_eth_driver_register(&rte_igbvf_pmd);
869 eth_igb_configure(struct rte_eth_dev *dev)
871 struct e1000_interrupt *intr =
872 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
874 PMD_INIT_FUNC_TRACE();
875 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
876 PMD_INIT_FUNC_TRACE();
882 eth_igb_start(struct rte_eth_dev *dev)
884 struct e1000_hw *hw =
885 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
886 struct e1000_adapter *adapter =
887 E1000_DEV_PRIVATE(dev->data->dev_private);
888 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
890 uint32_t intr_vector = 0;
893 PMD_INIT_FUNC_TRACE();
895 /* Power up the phy. Needed to make the link go Up */
896 e1000_power_up_phy(hw);
899 * Packet Buffer Allocation (PBA)
900 * Writing PBA sets the receive portion of the buffer
901 * the remainder is used for the transmit buffer.
903 if (hw->mac.type == e1000_82575) {
906 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
907 E1000_WRITE_REG(hw, E1000_PBA, pba);
910 /* Put the address into the Receive Address Array */
911 e1000_rar_set(hw, hw->mac.addr, 0);
913 /* Initialize the hardware */
914 if (igb_hardware_init(hw)) {
915 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
918 adapter->stopped = 0;
920 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
922 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
923 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
924 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
925 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
926 E1000_WRITE_FLUSH(hw);
928 /* configure PF module if SRIOV enabled */
929 igb_pf_host_configure(dev);
931 /* check and configure queue intr-vector mapping */
932 if (dev->data->dev_conf.intr_conf.rxq != 0)
933 intr_vector = dev->data->nb_rx_queues;
935 if (rte_intr_efd_enable(intr_handle, intr_vector))
938 if (rte_intr_dp_is_en(intr_handle)) {
939 intr_handle->intr_vec =
940 rte_zmalloc("intr_vec",
941 dev->data->nb_rx_queues * sizeof(int), 0);
942 if (intr_handle->intr_vec == NULL) {
943 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
944 " intr_vec\n", dev->data->nb_rx_queues);
949 /* confiugre msix for rx interrupt */
950 eth_igb_configure_msix_intr(dev);
952 /* Configure for OS presence */
953 igb_init_manageability(hw);
955 eth_igb_tx_init(dev);
957 /* This can fail when allocating mbufs for descriptor rings */
958 ret = eth_igb_rx_init(dev);
960 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
961 igb_dev_clear_queues(dev);
965 e1000_clear_hw_cntrs_base_generic(hw);
968 * VLAN Offload Settings
970 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
971 ETH_VLAN_EXTEND_MASK;
972 eth_igb_vlan_offload_set(dev, mask);
974 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
975 /* Enable VLAN filter since VMDq always use VLAN filter */
976 igb_vmdq_vlan_hw_filter_enable(dev);
979 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
980 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
981 (hw->mac.type == e1000_i211)) {
982 /* Configure EITR with the maximum possible value (0xFFFF) */
983 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
986 /* Setup link speed and duplex */
987 switch (dev->data->dev_conf.link_speed) {
988 case ETH_LINK_SPEED_AUTONEG:
989 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
990 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
991 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
992 hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
993 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
994 hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
996 goto error_invalid_config;
998 case ETH_LINK_SPEED_10:
999 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
1000 hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
1001 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1002 hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
1003 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1004 hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
1006 goto error_invalid_config;
1008 case ETH_LINK_SPEED_100:
1009 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
1010 hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
1011 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1012 hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
1013 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1014 hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
1016 goto error_invalid_config;
1018 case ETH_LINK_SPEED_1000:
1019 if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
1020 (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
1021 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
1023 goto error_invalid_config;
1025 case ETH_LINK_SPEED_10000:
1027 goto error_invalid_config;
1029 e1000_setup_link(hw);
1031 /* check if lsc interrupt feature is enabled */
1032 if (dev->data->dev_conf.intr_conf.lsc != 0) {
1033 if (rte_intr_allow_others(intr_handle)) {
1034 rte_intr_callback_register(intr_handle,
1035 eth_igb_interrupt_handler,
1037 eth_igb_lsc_interrupt_setup(dev);
1039 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1040 " no intr multiplex\n");
1043 /* check if rxq interrupt is enabled */
1044 if (dev->data->dev_conf.intr_conf.rxq != 0)
1045 eth_igb_rxq_interrupt_setup(dev);
1047 /* enable uio/vfio intr/eventfd mapping */
1048 rte_intr_enable(intr_handle);
1050 /* resume enabled intr since hw reset */
1051 igb_intr_enable(dev);
1053 PMD_INIT_LOG(DEBUG, "<<");
1057 error_invalid_config:
1058 PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
1059 dev->data->dev_conf.link_speed,
1060 dev->data->dev_conf.link_duplex, dev->data->port_id);
1061 igb_dev_clear_queues(dev);
1065 /*********************************************************************
1067 * This routine disables all traffic on the adapter by issuing a
1068 * global reset on the MAC.
1070 **********************************************************************/
1072 eth_igb_stop(struct rte_eth_dev *dev)
1074 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1075 struct e1000_filter_info *filter_info =
1076 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1077 struct rte_eth_link link;
1078 struct e1000_flex_filter *p_flex;
1079 struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
1080 struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
1081 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1083 igb_intr_disable(hw);
1085 /* disable intr eventfd mapping */
1086 rte_intr_disable(intr_handle);
1088 igb_pf_reset_hw(hw);
1089 E1000_WRITE_REG(hw, E1000_WUC, 0);
1091 /* Set bit for Go Link disconnect */
1092 if (hw->mac.type >= e1000_82580) {
1095 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1096 phpm_reg |= E1000_82580_PM_GO_LINKD;
1097 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1100 /* Power down the phy. Needed to make the link go Down */
1101 if (hw->phy.media_type == e1000_media_type_copper)
1102 e1000_power_down_phy(hw);
1104 e1000_shutdown_fiber_serdes_link(hw);
1106 igb_dev_clear_queues(dev);
1108 /* clear the recorded link status */
1109 memset(&link, 0, sizeof(link));
1110 rte_igb_dev_atomic_write_link_status(dev, &link);
1112 /* Remove all flex filters of the device */
1113 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
1114 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
1117 filter_info->flex_mask = 0;
1119 /* Remove all ntuple filters of the device */
1120 for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
1121 p_5tuple != NULL; p_5tuple = p_5tuple_next) {
1122 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
1123 TAILQ_REMOVE(&filter_info->fivetuple_list,
1127 filter_info->fivetuple_mask = 0;
1128 for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
1129 p_2tuple != NULL; p_2tuple = p_2tuple_next) {
1130 p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
1131 TAILQ_REMOVE(&filter_info->twotuple_list,
1135 filter_info->twotuple_mask = 0;
1137 /* Clean datapath event and queue/vec mapping */
1138 rte_intr_efd_disable(intr_handle);
1139 if (intr_handle->intr_vec != NULL) {
1140 rte_free(intr_handle->intr_vec);
1141 intr_handle->intr_vec = NULL;
1146 eth_igb_close(struct rte_eth_dev *dev)
1148 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1149 struct e1000_adapter *adapter =
1150 E1000_DEV_PRIVATE(dev->data->dev_private);
1151 struct rte_eth_link link;
1152 struct rte_pci_device *pci_dev;
1155 adapter->stopped = 1;
1157 e1000_phy_hw_reset(hw);
1158 igb_release_manageability(hw);
1159 igb_hw_control_release(hw);
1161 /* Clear bit for Go Link disconnect */
1162 if (hw->mac.type >= e1000_82580) {
1165 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1166 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1167 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1170 igb_dev_free_queues(dev);
1172 pci_dev = dev->pci_dev;
1173 if (pci_dev->intr_handle.intr_vec) {
1174 rte_free(pci_dev->intr_handle.intr_vec);
1175 pci_dev->intr_handle.intr_vec = NULL;
1178 memset(&link, 0, sizeof(link));
1179 rte_igb_dev_atomic_write_link_status(dev, &link);
1183 igb_get_rx_buffer_size(struct e1000_hw *hw)
1185 uint32_t rx_buf_size;
1186 if (hw->mac.type == e1000_82576) {
1187 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1188 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1189 /* PBS needs to be translated according to a lookup table */
1190 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1191 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1192 rx_buf_size = (rx_buf_size << 10);
1193 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1194 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1196 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1202 /*********************************************************************
1204 * Initialize the hardware
1206 **********************************************************************/
1208 igb_hardware_init(struct e1000_hw *hw)
1210 uint32_t rx_buf_size;
1213 /* Let the firmware know the OS is in control */
1214 igb_hw_control_acquire(hw);
1217 * These parameters control the automatic generation (Tx) and
1218 * response (Rx) to Ethernet PAUSE frames.
1219 * - High water mark should allow for at least two standard size (1518)
1220 * frames to be received after sending an XOFF.
1221 * - Low water mark works best when it is very near the high water mark.
1222 * This allows the receiver to restart by sending XON when it has
1223 * drained a bit. Here we use an arbitrary value of 1500 which will
1224 * restart after one full frame is pulled from the buffer. There
1225 * could be several smaller frames in the buffer and if so they will
1226 * not trigger the XON until their total number reduces the buffer
1228 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1230 rx_buf_size = igb_get_rx_buffer_size(hw);
1232 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1233 hw->fc.low_water = hw->fc.high_water - 1500;
1234 hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1235 hw->fc.send_xon = 1;
1237 /* Set Flow control, use the tunable location if sane */
1238 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1239 hw->fc.requested_mode = igb_fc_setting;
1241 hw->fc.requested_mode = e1000_fc_none;
1243 /* Issue a global reset */
1244 igb_pf_reset_hw(hw);
1245 E1000_WRITE_REG(hw, E1000_WUC, 0);
1247 diag = e1000_init_hw(hw);
1251 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1252 e1000_get_phy_info(hw);
1253 e1000_check_for_link(hw);
1258 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1260 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1262 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1263 struct e1000_hw_stats *stats =
1264 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1267 if(hw->phy.media_type == e1000_media_type_copper ||
1268 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1270 E1000_READ_REG(hw,E1000_SYMERRS);
1271 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1274 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1275 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1276 stats->scc += E1000_READ_REG(hw, E1000_SCC);
1277 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1279 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1280 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1281 stats->colc += E1000_READ_REG(hw, E1000_COLC);
1282 stats->dc += E1000_READ_REG(hw, E1000_DC);
1283 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1284 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1285 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1287 ** For watchdog management we need to know if we have been
1288 ** paused during the last interval, so capture that here.
1290 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1291 stats->xoffrxc += pause_frames;
1292 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1293 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1294 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1295 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1296 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1297 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1298 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1299 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1300 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1301 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1302 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1303 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1305 /* For the 64-bit byte counters the low dword must be read first. */
1306 /* Both registers clear on the read of the high dword */
1308 stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1309 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1310 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1311 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1313 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1314 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1315 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1316 stats->roc += E1000_READ_REG(hw, E1000_ROC);
1317 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1319 stats->tor += E1000_READ_REG(hw, E1000_TORH);
1320 stats->tot += E1000_READ_REG(hw, E1000_TOTH);
1322 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1323 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1324 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1325 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1326 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1327 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1328 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1329 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1330 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1331 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1333 /* Interrupt Counts */
1335 stats->iac += E1000_READ_REG(hw, E1000_IAC);
1336 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1337 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1338 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1339 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1340 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1341 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1342 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1343 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1345 /* Host to Card Statistics */
1347 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1348 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1349 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1350 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1351 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1352 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1353 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1354 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1355 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1356 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1357 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1358 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1359 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1360 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1362 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1363 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1364 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1365 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1366 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1367 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1369 if (rte_stats == NULL)
1373 rte_stats->ibadcrc = stats->crcerrs;
1374 rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
1375 rte_stats->imissed = stats->mpc;
1376 rte_stats->ierrors = rte_stats->ibadcrc +
1377 rte_stats->ibadlen +
1378 rte_stats->imissed +
1379 stats->rxerrc + stats->algnerrc + stats->cexterr;
1382 rte_stats->oerrors = stats->ecol + stats->latecol;
1384 /* XON/XOFF pause frames */
1385 rte_stats->tx_pause_xon = stats->xontxc;
1386 rte_stats->rx_pause_xon = stats->xonrxc;
1387 rte_stats->tx_pause_xoff = stats->xofftxc;
1388 rte_stats->rx_pause_xoff = stats->xoffrxc;
1390 rte_stats->ipackets = stats->gprc;
1391 rte_stats->opackets = stats->gptc;
1392 rte_stats->ibytes = stats->gorc;
1393 rte_stats->obytes = stats->gotc;
1397 eth_igb_stats_reset(struct rte_eth_dev *dev)
1399 struct e1000_hw_stats *hw_stats =
1400 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1402 /* HW registers are cleared on read */
1403 eth_igb_stats_get(dev, NULL);
1405 /* Reset software totals */
1406 memset(hw_stats, 0, sizeof(*hw_stats));
1410 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1412 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1413 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1414 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1416 /* Good Rx packets, include VF loopback */
1417 UPDATE_VF_STAT(E1000_VFGPRC,
1418 hw_stats->last_gprc, hw_stats->gprc);
1420 /* Good Rx octets, include VF loopback */
1421 UPDATE_VF_STAT(E1000_VFGORC,
1422 hw_stats->last_gorc, hw_stats->gorc);
1424 /* Good Tx packets, include VF loopback */
1425 UPDATE_VF_STAT(E1000_VFGPTC,
1426 hw_stats->last_gptc, hw_stats->gptc);
1428 /* Good Tx octets, include VF loopback */
1429 UPDATE_VF_STAT(E1000_VFGOTC,
1430 hw_stats->last_gotc, hw_stats->gotc);
1432 /* Rx Multicst packets */
1433 UPDATE_VF_STAT(E1000_VFMPRC,
1434 hw_stats->last_mprc, hw_stats->mprc);
1436 /* Good Rx loopback packets */
1437 UPDATE_VF_STAT(E1000_VFGPRLBC,
1438 hw_stats->last_gprlbc, hw_stats->gprlbc);
1440 /* Good Rx loopback octets */
1441 UPDATE_VF_STAT(E1000_VFGORLBC,
1442 hw_stats->last_gorlbc, hw_stats->gorlbc);
1444 /* Good Tx loopback packets */
1445 UPDATE_VF_STAT(E1000_VFGPTLBC,
1446 hw_stats->last_gptlbc, hw_stats->gptlbc);
1448 /* Good Tx loopback octets */
1449 UPDATE_VF_STAT(E1000_VFGOTLBC,
1450 hw_stats->last_gotlbc, hw_stats->gotlbc);
1452 if (rte_stats == NULL)
1455 rte_stats->ipackets = hw_stats->gprc;
1456 rte_stats->ibytes = hw_stats->gorc;
1457 rte_stats->opackets = hw_stats->gptc;
1458 rte_stats->obytes = hw_stats->gotc;
1459 rte_stats->imcasts = hw_stats->mprc;
1460 rte_stats->ilbpackets = hw_stats->gprlbc;
1461 rte_stats->ilbbytes = hw_stats->gorlbc;
1462 rte_stats->olbpackets = hw_stats->gptlbc;
1463 rte_stats->olbbytes = hw_stats->gotlbc;
1468 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1470 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1471 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1473 /* Sync HW register to the last stats */
1474 eth_igbvf_stats_get(dev, NULL);
1476 /* reset HW current stats*/
1477 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1478 offsetof(struct e1000_vf_stats, gprc));
1483 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1485 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1487 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1488 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1489 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1490 dev_info->rx_offload_capa =
1491 DEV_RX_OFFLOAD_VLAN_STRIP |
1492 DEV_RX_OFFLOAD_IPV4_CKSUM |
1493 DEV_RX_OFFLOAD_UDP_CKSUM |
1494 DEV_RX_OFFLOAD_TCP_CKSUM;
1495 dev_info->tx_offload_capa =
1496 DEV_TX_OFFLOAD_VLAN_INSERT |
1497 DEV_TX_OFFLOAD_IPV4_CKSUM |
1498 DEV_TX_OFFLOAD_UDP_CKSUM |
1499 DEV_TX_OFFLOAD_TCP_CKSUM |
1500 DEV_TX_OFFLOAD_SCTP_CKSUM;
1502 switch (hw->mac.type) {
1504 dev_info->max_rx_queues = 4;
1505 dev_info->max_tx_queues = 4;
1506 dev_info->max_vmdq_pools = 0;
1510 dev_info->max_rx_queues = 16;
1511 dev_info->max_tx_queues = 16;
1512 dev_info->max_vmdq_pools = ETH_8_POOLS;
1513 dev_info->vmdq_queue_num = 16;
1517 dev_info->max_rx_queues = 8;
1518 dev_info->max_tx_queues = 8;
1519 dev_info->max_vmdq_pools = ETH_8_POOLS;
1520 dev_info->vmdq_queue_num = 8;
1524 dev_info->max_rx_queues = 8;
1525 dev_info->max_tx_queues = 8;
1526 dev_info->max_vmdq_pools = ETH_8_POOLS;
1527 dev_info->vmdq_queue_num = 8;
1531 dev_info->max_rx_queues = 8;
1532 dev_info->max_tx_queues = 8;
1536 dev_info->max_rx_queues = 4;
1537 dev_info->max_tx_queues = 4;
1538 dev_info->max_vmdq_pools = 0;
1542 dev_info->max_rx_queues = 2;
1543 dev_info->max_tx_queues = 2;
1544 dev_info->max_vmdq_pools = 0;
1548 /* Should not happen */
1551 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
1552 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1553 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
1555 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1557 .pthresh = IGB_DEFAULT_RX_PTHRESH,
1558 .hthresh = IGB_DEFAULT_RX_HTHRESH,
1559 .wthresh = IGB_DEFAULT_RX_WTHRESH,
1561 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1565 dev_info->default_txconf = (struct rte_eth_txconf) {
1567 .pthresh = IGB_DEFAULT_TX_PTHRESH,
1568 .hthresh = IGB_DEFAULT_TX_HTHRESH,
1569 .wthresh = IGB_DEFAULT_TX_WTHRESH,
1576 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1578 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1580 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1581 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1582 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1583 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1584 DEV_RX_OFFLOAD_IPV4_CKSUM |
1585 DEV_RX_OFFLOAD_UDP_CKSUM |
1586 DEV_RX_OFFLOAD_TCP_CKSUM;
1587 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1588 DEV_TX_OFFLOAD_IPV4_CKSUM |
1589 DEV_TX_OFFLOAD_UDP_CKSUM |
1590 DEV_TX_OFFLOAD_TCP_CKSUM |
1591 DEV_TX_OFFLOAD_SCTP_CKSUM;
1592 switch (hw->mac.type) {
1594 dev_info->max_rx_queues = 2;
1595 dev_info->max_tx_queues = 2;
1597 case e1000_vfadapt_i350:
1598 dev_info->max_rx_queues = 1;
1599 dev_info->max_tx_queues = 1;
1602 /* Should not happen */
1606 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1608 .pthresh = IGB_DEFAULT_RX_PTHRESH,
1609 .hthresh = IGB_DEFAULT_RX_HTHRESH,
1610 .wthresh = IGB_DEFAULT_RX_WTHRESH,
1612 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1616 dev_info->default_txconf = (struct rte_eth_txconf) {
1618 .pthresh = IGB_DEFAULT_TX_PTHRESH,
1619 .hthresh = IGB_DEFAULT_TX_HTHRESH,
1620 .wthresh = IGB_DEFAULT_TX_WTHRESH,
1626 /* return 0 means link status changed, -1 means not changed */
1628 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1630 struct e1000_hw *hw =
1631 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1632 struct rte_eth_link link, old;
1633 int link_check, count;
1636 hw->mac.get_link_status = 1;
1638 /* possible wait-to-complete in up to 9 seconds */
1639 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
1640 /* Read the real link status */
1641 switch (hw->phy.media_type) {
1642 case e1000_media_type_copper:
1643 /* Do the work to read phy */
1644 e1000_check_for_link(hw);
1645 link_check = !hw->mac.get_link_status;
1648 case e1000_media_type_fiber:
1649 e1000_check_for_link(hw);
1650 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1654 case e1000_media_type_internal_serdes:
1655 e1000_check_for_link(hw);
1656 link_check = hw->mac.serdes_has_link;
1659 /* VF device is type_unknown */
1660 case e1000_media_type_unknown:
1661 eth_igbvf_link_update(hw);
1662 link_check = !hw->mac.get_link_status;
1668 if (link_check || wait_to_complete == 0)
1670 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
1672 memset(&link, 0, sizeof(link));
1673 rte_igb_dev_atomic_read_link_status(dev, &link);
1676 /* Now we check if a transition has happened */
1678 hw->mac.ops.get_link_up_info(hw, &link.link_speed,
1680 link.link_status = 1;
1681 } else if (!link_check) {
1682 link.link_speed = 0;
1683 link.link_duplex = 0;
1684 link.link_status = 0;
1686 rte_igb_dev_atomic_write_link_status(dev, &link);
1689 if (old.link_status == link.link_status)
1697 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
1698 * For ASF and Pass Through versions of f/w this means
1699 * that the driver is loaded.
1702 igb_hw_control_acquire(struct e1000_hw *hw)
1706 /* Let firmware know the driver has taken over */
1707 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1708 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1712 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
1713 * For ASF and Pass Through versions of f/w this means that the
1714 * driver is no longer loaded.
1717 igb_hw_control_release(struct e1000_hw *hw)
1721 /* Let firmware taken over control of h/w */
1722 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1723 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
1724 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1728 * Bit of a misnomer, what this really means is
1729 * to enable OS management of the system... aka
1730 * to disable special hardware management features.
1733 igb_init_manageability(struct e1000_hw *hw)
1735 if (e1000_enable_mng_pass_thru(hw)) {
1736 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
1737 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1739 /* disable hardware interception of ARP */
1740 manc &= ~(E1000_MANC_ARP_EN);
1742 /* enable receiving management packets to the host */
1743 manc |= E1000_MANC_EN_MNG2HOST;
1744 manc2h |= 1 << 5; /* Mng Port 623 */
1745 manc2h |= 1 << 6; /* Mng Port 664 */
1746 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
1747 E1000_WRITE_REG(hw, E1000_MANC, manc);
1752 igb_release_manageability(struct e1000_hw *hw)
1754 if (e1000_enable_mng_pass_thru(hw)) {
1755 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1757 manc |= E1000_MANC_ARP_EN;
1758 manc &= ~E1000_MANC_EN_MNG2HOST;
1760 E1000_WRITE_REG(hw, E1000_MANC, manc);
1765 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
1767 struct e1000_hw *hw =
1768 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1771 rctl = E1000_READ_REG(hw, E1000_RCTL);
1772 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1773 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1777 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
1779 struct e1000_hw *hw =
1780 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1783 rctl = E1000_READ_REG(hw, E1000_RCTL);
1784 rctl &= (~E1000_RCTL_UPE);
1785 if (dev->data->all_multicast == 1)
1786 rctl |= E1000_RCTL_MPE;
1788 rctl &= (~E1000_RCTL_MPE);
1789 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1793 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
1795 struct e1000_hw *hw =
1796 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1799 rctl = E1000_READ_REG(hw, E1000_RCTL);
1800 rctl |= E1000_RCTL_MPE;
1801 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1805 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
1807 struct e1000_hw *hw =
1808 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1811 if (dev->data->promiscuous == 1)
1812 return; /* must remain in all_multicast mode */
1813 rctl = E1000_READ_REG(hw, E1000_RCTL);
1814 rctl &= (~E1000_RCTL_MPE);
1815 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1819 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1821 struct e1000_hw *hw =
1822 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1823 struct e1000_vfta * shadow_vfta =
1824 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1829 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
1830 E1000_VFTA_ENTRY_MASK);
1831 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
1832 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
1837 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
1839 /* update local VFTA copy */
1840 shadow_vfta->vfta[vid_idx] = vfta;
1846 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1848 struct e1000_hw *hw =
1849 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1850 uint32_t reg = ETHER_TYPE_VLAN ;
1852 reg |= (tpid << 16);
1853 E1000_WRITE_REG(hw, E1000_VET, reg);
1857 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1859 struct e1000_hw *hw =
1860 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1863 /* Filter Table Disable */
1864 reg = E1000_READ_REG(hw, E1000_RCTL);
1865 reg &= ~E1000_RCTL_CFIEN;
1866 reg &= ~E1000_RCTL_VFE;
1867 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1871 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1873 struct e1000_hw *hw =
1874 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1875 struct e1000_vfta * shadow_vfta =
1876 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1880 /* Filter Table Enable, CFI not used for packet acceptance */
1881 reg = E1000_READ_REG(hw, E1000_RCTL);
1882 reg &= ~E1000_RCTL_CFIEN;
1883 reg |= E1000_RCTL_VFE;
1884 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1886 /* restore VFTA table */
1887 for (i = 0; i < IGB_VFTA_SIZE; i++)
1888 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
1892 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
1894 struct e1000_hw *hw =
1895 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1898 /* VLAN Mode Disable */
1899 reg = E1000_READ_REG(hw, E1000_CTRL);
1900 reg &= ~E1000_CTRL_VME;
1901 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1905 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
1907 struct e1000_hw *hw =
1908 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1911 /* VLAN Mode Enable */
1912 reg = E1000_READ_REG(hw, E1000_CTRL);
1913 reg |= E1000_CTRL_VME;
1914 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1918 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1920 struct e1000_hw *hw =
1921 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1924 /* CTRL_EXT: Extended VLAN */
1925 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1926 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
1927 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1929 /* Update maximum packet length */
1930 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1931 E1000_WRITE_REG(hw, E1000_RLPML,
1932 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1937 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1939 struct e1000_hw *hw =
1940 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1943 /* CTRL_EXT: Extended VLAN */
1944 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1945 reg |= E1000_CTRL_EXT_EXTEND_VLAN;
1946 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1948 /* Update maximum packet length */
1949 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1950 E1000_WRITE_REG(hw, E1000_RLPML,
1951 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1956 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1958 if(mask & ETH_VLAN_STRIP_MASK){
1959 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1960 igb_vlan_hw_strip_enable(dev);
1962 igb_vlan_hw_strip_disable(dev);
1965 if(mask & ETH_VLAN_FILTER_MASK){
1966 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1967 igb_vlan_hw_filter_enable(dev);
1969 igb_vlan_hw_filter_disable(dev);
1972 if(mask & ETH_VLAN_EXTEND_MASK){
1973 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1974 igb_vlan_hw_extend_enable(dev);
1976 igb_vlan_hw_extend_disable(dev);
1982 * It enables the interrupt mask and then enable the interrupt.
1985 * Pointer to struct rte_eth_dev.
1988 * - On success, zero.
1989 * - On failure, a negative value.
1992 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
1994 struct e1000_interrupt *intr =
1995 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1997 intr->mask |= E1000_ICR_LSC;
2002 /* It clears the interrupt causes and enables the interrupt.
2003 * It will be called once only during nic initialized.
2006 * Pointer to struct rte_eth_dev.
2009 * - On success, zero.
2010 * - On failure, a negative value.
2012 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
2014 uint32_t mask, regval;
2015 struct e1000_hw *hw =
2016 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2017 struct rte_eth_dev_info dev_info;
2019 memset(&dev_info, 0, sizeof(dev_info));
2020 eth_igb_infos_get(dev, &dev_info);
2022 mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
2023 regval = E1000_READ_REG(hw, E1000_EIMS);
2024 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
2030 * It reads ICR and gets interrupt causes, check it and set a bit flag
2031 * to update link status.
2034 * Pointer to struct rte_eth_dev.
2037 * - On success, zero.
2038 * - On failure, a negative value.
2041 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
2044 struct e1000_hw *hw =
2045 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2046 struct e1000_interrupt *intr =
2047 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2049 igb_intr_disable(hw);
2051 /* read-on-clear nic registers here */
2052 icr = E1000_READ_REG(hw, E1000_ICR);
2055 if (icr & E1000_ICR_LSC) {
2056 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
2059 if (icr & E1000_ICR_VMMB)
2060 intr->flags |= E1000_FLAG_MAILBOX;
2066 * It executes link_update after knowing an interrupt is prsent.
2069 * Pointer to struct rte_eth_dev.
2072 * - On success, zero.
2073 * - On failure, a negative value.
2076 eth_igb_interrupt_action(struct rte_eth_dev *dev)
2078 struct e1000_hw *hw =
2079 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2080 struct e1000_interrupt *intr =
2081 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2082 uint32_t tctl, rctl;
2083 struct rte_eth_link link;
2086 if (intr->flags & E1000_FLAG_MAILBOX) {
2087 igb_pf_mbx_process(dev);
2088 intr->flags &= ~E1000_FLAG_MAILBOX;
2091 igb_intr_enable(dev);
2092 rte_intr_enable(&(dev->pci_dev->intr_handle));
2094 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
2095 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
2097 /* set get_link_status to check register later */
2098 hw->mac.get_link_status = 1;
2099 ret = eth_igb_link_update(dev, 0);
2101 /* check if link has changed */
2105 memset(&link, 0, sizeof(link));
2106 rte_igb_dev_atomic_read_link_status(dev, &link);
2107 if (link.link_status) {
2109 " Port %d: Link Up - speed %u Mbps - %s",
2111 (unsigned)link.link_speed,
2112 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2113 "full-duplex" : "half-duplex");
2115 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2116 dev->data->port_id);
2119 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
2120 dev->pci_dev->addr.domain,
2121 dev->pci_dev->addr.bus,
2122 dev->pci_dev->addr.devid,
2123 dev->pci_dev->addr.function);
2124 tctl = E1000_READ_REG(hw, E1000_TCTL);
2125 rctl = E1000_READ_REG(hw, E1000_RCTL);
2126 if (link.link_status) {
2128 tctl |= E1000_TCTL_EN;
2129 rctl |= E1000_RCTL_EN;
2132 tctl &= ~E1000_TCTL_EN;
2133 rctl &= ~E1000_RCTL_EN;
2135 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2136 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2137 E1000_WRITE_FLUSH(hw);
2138 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
2145 * Interrupt handler which shall be registered at first.
2148 * Pointer to interrupt handle.
2150 * The address of parameter (struct rte_eth_dev *) regsitered before.
2156 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
2159 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2161 eth_igb_interrupt_get_status(dev);
2162 eth_igb_interrupt_action(dev);
2166 eth_igb_led_on(struct rte_eth_dev *dev)
2168 struct e1000_hw *hw;
2170 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2171 return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
2175 eth_igb_led_off(struct rte_eth_dev *dev)
2177 struct e1000_hw *hw;
2179 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2180 return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
2184 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2186 struct e1000_hw *hw;
2191 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2192 fc_conf->pause_time = hw->fc.pause_time;
2193 fc_conf->high_water = hw->fc.high_water;
2194 fc_conf->low_water = hw->fc.low_water;
2195 fc_conf->send_xon = hw->fc.send_xon;
2196 fc_conf->autoneg = hw->mac.autoneg;
2199 * Return rx_pause and tx_pause status according to actual setting of
2200 * the TFCE and RFCE bits in the CTRL register.
2202 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2203 if (ctrl & E1000_CTRL_TFCE)
2208 if (ctrl & E1000_CTRL_RFCE)
2213 if (rx_pause && tx_pause)
2214 fc_conf->mode = RTE_FC_FULL;
2216 fc_conf->mode = RTE_FC_RX_PAUSE;
2218 fc_conf->mode = RTE_FC_TX_PAUSE;
2220 fc_conf->mode = RTE_FC_NONE;
2226 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2228 struct e1000_hw *hw;
2230 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
2236 uint32_t rx_buf_size;
2237 uint32_t max_high_water;
2240 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2241 if (fc_conf->autoneg != hw->mac.autoneg)
2243 rx_buf_size = igb_get_rx_buffer_size(hw);
2244 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2246 /* At least reserve one Ethernet frame for watermark */
2247 max_high_water = rx_buf_size - ETHER_MAX_LEN;
2248 if ((fc_conf->high_water > max_high_water) ||
2249 (fc_conf->high_water < fc_conf->low_water)) {
2250 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
2251 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
2255 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
2256 hw->fc.pause_time = fc_conf->pause_time;
2257 hw->fc.high_water = fc_conf->high_water;
2258 hw->fc.low_water = fc_conf->low_water;
2259 hw->fc.send_xon = fc_conf->send_xon;
2261 err = e1000_setup_link_generic(hw);
2262 if (err == E1000_SUCCESS) {
2264 /* check if we want to forward MAC frames - driver doesn't have native
2265 * capability to do that, so we'll write the registers ourselves */
2267 rctl = E1000_READ_REG(hw, E1000_RCTL);
2269 /* set or clear MFLCN.PMCF bit depending on configuration */
2270 if (fc_conf->mac_ctrl_frame_fwd != 0)
2271 rctl |= E1000_RCTL_PMCF;
2273 rctl &= ~E1000_RCTL_PMCF;
2275 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2276 E1000_WRITE_FLUSH(hw);
2281 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
2285 #define E1000_RAH_POOLSEL_SHIFT (18)
2287 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2288 uint32_t index, __rte_unused uint32_t pool)
2290 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2293 e1000_rar_set(hw, mac_addr->addr_bytes, index);
2294 rah = E1000_READ_REG(hw, E1000_RAH(index));
2295 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
2296 E1000_WRITE_REG(hw, E1000_RAH(index), rah);
2300 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
2302 uint8_t addr[ETHER_ADDR_LEN];
2303 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2305 memset(addr, 0, sizeof(addr));
2307 e1000_rar_set(hw, addr, index);
2311 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
2312 struct ether_addr *addr)
2314 eth_igb_rar_clear(dev, 0);
2316 eth_igb_rar_set(dev, (void *)addr, 0, 0);
2319 * Virtual Function operations
2322 igbvf_intr_disable(struct e1000_hw *hw)
2324 PMD_INIT_FUNC_TRACE();
2326 /* Clear interrupt mask to stop from interrupts being generated */
2327 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
2329 E1000_WRITE_FLUSH(hw);
2333 igbvf_stop_adapter(struct rte_eth_dev *dev)
2337 struct rte_eth_dev_info dev_info;
2338 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2340 memset(&dev_info, 0, sizeof(dev_info));
2341 eth_igbvf_infos_get(dev, &dev_info);
2343 /* Clear interrupt mask to stop from interrupts being generated */
2344 igbvf_intr_disable(hw);
2346 /* Clear any pending interrupts, flush previous writes */
2347 E1000_READ_REG(hw, E1000_EICR);
2349 /* Disable the transmit unit. Each queue must be disabled. */
2350 for (i = 0; i < dev_info.max_tx_queues; i++)
2351 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
2353 /* Disable the receive unit by stopping each queue */
2354 for (i = 0; i < dev_info.max_rx_queues; i++) {
2355 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
2356 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
2357 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
2358 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
2362 /* flush all queues disables */
2363 E1000_WRITE_FLUSH(hw);
2367 static int eth_igbvf_link_update(struct e1000_hw *hw)
2369 struct e1000_mbx_info *mbx = &hw->mbx;
2370 struct e1000_mac_info *mac = &hw->mac;
2371 int ret_val = E1000_SUCCESS;
2373 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
2376 * We only want to run this if there has been a rst asserted.
2377 * in this case that could mean a link change, device reset,
2378 * or a virtual function reset
2381 /* If we were hit with a reset or timeout drop the link */
2382 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
2383 mac->get_link_status = TRUE;
2385 if (!mac->get_link_status)
2388 /* if link status is down no point in checking to see if pf is up */
2389 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
2392 /* if we passed all the tests above then the link is up and we no
2393 * longer need to check for link */
2394 mac->get_link_status = FALSE;
2402 igbvf_dev_configure(struct rte_eth_dev *dev)
2404 struct rte_eth_conf* conf = &dev->data->dev_conf;
2406 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
2407 dev->data->port_id);
2410 * VF has no ability to enable/disable HW CRC
2411 * Keep the persistent behavior the same as Host PF
2413 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
2414 if (!conf->rxmode.hw_strip_crc) {
2415 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
2416 conf->rxmode.hw_strip_crc = 1;
2419 if (conf->rxmode.hw_strip_crc) {
2420 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
2421 conf->rxmode.hw_strip_crc = 0;
2429 igbvf_dev_start(struct rte_eth_dev *dev)
2431 struct e1000_hw *hw =
2432 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2433 struct e1000_adapter *adapter =
2434 E1000_DEV_PRIVATE(dev->data->dev_private);
2437 PMD_INIT_FUNC_TRACE();
2439 hw->mac.ops.reset_hw(hw);
2440 adapter->stopped = 0;
2443 igbvf_set_vfta_all(dev,1);
2445 eth_igbvf_tx_init(dev);
2447 /* This can fail when allocating mbufs for descriptor rings */
2448 ret = eth_igbvf_rx_init(dev);
2450 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2451 igb_dev_clear_queues(dev);
2459 igbvf_dev_stop(struct rte_eth_dev *dev)
2461 PMD_INIT_FUNC_TRACE();
2463 igbvf_stop_adapter(dev);
2466 * Clear what we set, but we still keep shadow_vfta to
2467 * restore after device starts
2469 igbvf_set_vfta_all(dev,0);
2471 igb_dev_clear_queues(dev);
2475 igbvf_dev_close(struct rte_eth_dev *dev)
2477 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2478 struct e1000_adapter *adapter =
2479 E1000_DEV_PRIVATE(dev->data->dev_private);
2481 PMD_INIT_FUNC_TRACE();
2485 igbvf_dev_stop(dev);
2486 adapter->stopped = 1;
2487 igb_dev_free_queues(dev);
2490 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
2492 struct e1000_mbx_info *mbx = &hw->mbx;
2495 /* After set vlan, vlan strip will also be enabled in igb driver*/
2496 msgbuf[0] = E1000_VF_SET_VLAN;
2498 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
2500 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
2502 return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
2505 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
2507 struct e1000_hw *hw =
2508 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2509 struct e1000_vfta * shadow_vfta =
2510 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2511 int i = 0, j = 0, vfta = 0, mask = 1;
2513 for (i = 0; i < IGB_VFTA_SIZE; i++){
2514 vfta = shadow_vfta->vfta[i];
2517 for (j = 0; j < 32; j++){
2520 (uint16_t)((i<<5)+j), on);
2529 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2531 struct e1000_hw *hw =
2532 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2533 struct e1000_vfta * shadow_vfta =
2534 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2535 uint32_t vid_idx = 0;
2536 uint32_t vid_bit = 0;
2539 PMD_INIT_FUNC_TRACE();
2541 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
2542 ret = igbvf_set_vfta(hw, vlan_id, !!on);
2544 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
2547 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
2548 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
2550 /*Save what we set and retore it after device reset*/
2552 shadow_vfta->vfta[vid_idx] |= vid_bit;
2554 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
2560 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
2562 struct e1000_hw *hw =
2563 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2565 /* index is not used by rar_set() */
2566 hw->mac.ops.rar_set(hw, (void *)addr, 0);
2571 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
2572 struct rte_eth_rss_reta_entry64 *reta_conf,
2577 uint16_t idx, shift;
2578 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2580 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2581 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2582 "(%d) doesn't match the number hardware can supported "
2583 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2587 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2588 idx = i / RTE_RETA_GROUP_SIZE;
2589 shift = i % RTE_RETA_GROUP_SIZE;
2590 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2594 if (mask == IGB_4_BIT_MASK)
2597 r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2598 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
2599 if (mask & (0x1 << j))
2600 reta |= reta_conf[idx].reta[shift + j] <<
2603 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
2605 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
2612 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
2613 struct rte_eth_rss_reta_entry64 *reta_conf,
2618 uint16_t idx, shift;
2619 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2621 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2622 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2623 "(%d) doesn't match the number hardware can supported "
2624 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2628 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2629 idx = i / RTE_RETA_GROUP_SIZE;
2630 shift = i % RTE_RETA_GROUP_SIZE;
2631 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2635 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2636 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
2637 if (mask & (0x1 << j))
2638 reta_conf[idx].reta[shift + j] =
2639 ((reta >> (CHAR_BIT * j)) &
2647 #define MAC_TYPE_FILTER_SUP(type) do {\
2648 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
2649 (type) != e1000_82576)\
2654 eth_igb_syn_filter_set(struct rte_eth_dev *dev,
2655 struct rte_eth_syn_filter *filter,
2658 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2659 uint32_t synqf, rfctl;
2661 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2664 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2667 if (synqf & E1000_SYN_FILTER_ENABLE)
2670 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
2671 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
2673 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2674 if (filter->hig_pri)
2675 rfctl |= E1000_RFCTL_SYNQFP;
2677 rfctl &= ~E1000_RFCTL_SYNQFP;
2679 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
2681 if (!(synqf & E1000_SYN_FILTER_ENABLE))
2686 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
2687 E1000_WRITE_FLUSH(hw);
2692 eth_igb_syn_filter_get(struct rte_eth_dev *dev,
2693 struct rte_eth_syn_filter *filter)
2695 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2696 uint32_t synqf, rfctl;
2698 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2699 if (synqf & E1000_SYN_FILTER_ENABLE) {
2700 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2701 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
2702 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
2703 E1000_SYN_FILTER_QUEUE_SHIFT);
2711 eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
2712 enum rte_filter_op filter_op,
2715 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2718 MAC_TYPE_FILTER_SUP(hw->mac.type);
2720 if (filter_op == RTE_ETH_FILTER_NOP)
2724 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
2729 switch (filter_op) {
2730 case RTE_ETH_FILTER_ADD:
2731 ret = eth_igb_syn_filter_set(dev,
2732 (struct rte_eth_syn_filter *)arg,
2735 case RTE_ETH_FILTER_DELETE:
2736 ret = eth_igb_syn_filter_set(dev,
2737 (struct rte_eth_syn_filter *)arg,
2740 case RTE_ETH_FILTER_GET:
2741 ret = eth_igb_syn_filter_get(dev,
2742 (struct rte_eth_syn_filter *)arg);
2745 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
2753 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
2754 if ((type) != e1000_82580 && (type) != e1000_i350)\
2758 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
2760 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
2761 struct e1000_2tuple_filter_info *filter_info)
2763 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2765 if (filter->priority > E1000_2TUPLE_MAX_PRI)
2766 return -EINVAL; /* filter index is out of range. */
2767 if (filter->tcp_flags > TCP_FLAG_ALL)
2768 return -EINVAL; /* flags is invalid. */
2770 switch (filter->dst_port_mask) {
2772 filter_info->dst_port_mask = 0;
2773 filter_info->dst_port = filter->dst_port;
2776 filter_info->dst_port_mask = 1;
2779 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
2783 switch (filter->proto_mask) {
2785 filter_info->proto_mask = 0;
2786 filter_info->proto = filter->proto;
2789 filter_info->proto_mask = 1;
2792 PMD_DRV_LOG(ERR, "invalid protocol mask.");
2796 filter_info->priority = (uint8_t)filter->priority;
2797 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
2798 filter_info->tcp_flags = filter->tcp_flags;
2800 filter_info->tcp_flags = 0;
2805 static inline struct e1000_2tuple_filter *
2806 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
2807 struct e1000_2tuple_filter_info *key)
2809 struct e1000_2tuple_filter *it;
2811 TAILQ_FOREACH(it, filter_list, entries) {
2812 if (memcmp(key, &it->filter_info,
2813 sizeof(struct e1000_2tuple_filter_info)) == 0) {
2821 * igb_add_2tuple_filter - add a 2tuple filter
2824 * dev: Pointer to struct rte_eth_dev.
2825 * ntuple_filter: ponter to the filter that will be added.
2828 * - On success, zero.
2829 * - On failure, a negative value.
2832 igb_add_2tuple_filter(struct rte_eth_dev *dev,
2833 struct rte_eth_ntuple_filter *ntuple_filter)
2835 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2836 struct e1000_filter_info *filter_info =
2837 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2838 struct e1000_2tuple_filter *filter;
2839 uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
2840 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
2843 filter = rte_zmalloc("e1000_2tuple_filter",
2844 sizeof(struct e1000_2tuple_filter), 0);
2848 ret = ntuple_filter_to_2tuple(ntuple_filter,
2849 &filter->filter_info);
2854 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2855 &filter->filter_info) != NULL) {
2856 PMD_DRV_LOG(ERR, "filter exists.");
2860 filter->queue = ntuple_filter->queue;
2863 * look for an unused 2tuple filter index,
2864 * and insert the filter to list.
2866 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
2867 if (!(filter_info->twotuple_mask & (1 << i))) {
2868 filter_info->twotuple_mask |= 1 << i;
2870 TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
2876 if (i >= E1000_MAX_TTQF_FILTERS) {
2877 PMD_DRV_LOG(ERR, "2tuple filters are full.");
2882 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
2883 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
2884 imir |= E1000_IMIR_PORT_BP;
2886 imir &= ~E1000_IMIR_PORT_BP;
2888 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
2890 ttqf |= E1000_TTQF_QUEUE_ENABLE;
2891 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
2892 ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
2893 if (filter->filter_info.proto_mask == 0)
2894 ttqf &= ~E1000_TTQF_MASK_ENABLE;
2896 /* tcp flags bits setting. */
2897 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
2898 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
2899 imir_ext |= E1000_IMIREXT_CTRL_URG;
2900 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
2901 imir_ext |= E1000_IMIREXT_CTRL_ACK;
2902 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
2903 imir_ext |= E1000_IMIREXT_CTRL_PSH;
2904 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
2905 imir_ext |= E1000_IMIREXT_CTRL_RST;
2906 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
2907 imir_ext |= E1000_IMIREXT_CTRL_SYN;
2908 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
2909 imir_ext |= E1000_IMIREXT_CTRL_FIN;
2911 imir_ext |= E1000_IMIREXT_CTRL_BP;
2912 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
2913 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
2914 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
2919 * igb_remove_2tuple_filter - remove a 2tuple filter
2922 * dev: Pointer to struct rte_eth_dev.
2923 * ntuple_filter: ponter to the filter that will be removed.
2926 * - On success, zero.
2927 * - On failure, a negative value.
2930 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
2931 struct rte_eth_ntuple_filter *ntuple_filter)
2933 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2934 struct e1000_filter_info *filter_info =
2935 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2936 struct e1000_2tuple_filter_info filter_2tuple;
2937 struct e1000_2tuple_filter *filter;
2940 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
2941 ret = ntuple_filter_to_2tuple(ntuple_filter,
2946 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2948 if (filter == NULL) {
2949 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2953 filter_info->twotuple_mask &= ~(1 << filter->index);
2954 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
2957 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
2958 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
2959 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
2963 static inline struct e1000_flex_filter *
2964 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
2965 struct e1000_flex_filter_info *key)
2967 struct e1000_flex_filter *it;
2969 TAILQ_FOREACH(it, filter_list, entries) {
2970 if (memcmp(key, &it->filter_info,
2971 sizeof(struct e1000_flex_filter_info)) == 0)
2979 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
2980 struct rte_eth_flex_filter *filter,
2983 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2984 struct e1000_filter_info *filter_info =
2985 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2986 struct e1000_flex_filter *flex_filter, *it;
2987 uint32_t wufc, queueing, mask;
2989 uint8_t shift, i, j = 0;
2991 flex_filter = rte_zmalloc("e1000_flex_filter",
2992 sizeof(struct e1000_flex_filter), 0);
2993 if (flex_filter == NULL)
2996 flex_filter->filter_info.len = filter->len;
2997 flex_filter->filter_info.priority = filter->priority;
2998 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
2999 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
3001 /* reverse bits in flex filter's mask*/
3002 for (shift = 0; shift < CHAR_BIT; shift++) {
3003 if (filter->mask[i] & (0x01 << shift))
3004 mask |= (0x80 >> shift);
3006 flex_filter->filter_info.mask[i] = mask;
3009 wufc = E1000_READ_REG(hw, E1000_WUFC);
3010 if (flex_filter->index < E1000_MAX_FHFT)
3011 reg_off = E1000_FHFT(flex_filter->index);
3013 reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
3016 if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
3017 &flex_filter->filter_info) != NULL) {
3018 PMD_DRV_LOG(ERR, "filter exists.");
3019 rte_free(flex_filter);
3022 flex_filter->queue = filter->queue;
3024 * look for an unused flex filter index
3025 * and insert the filter into the list.
3027 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
3028 if (!(filter_info->flex_mask & (1 << i))) {
3029 filter_info->flex_mask |= 1 << i;
3030 flex_filter->index = i;
3031 TAILQ_INSERT_TAIL(&filter_info->flex_list,
3037 if (i >= E1000_MAX_FLEX_FILTERS) {
3038 PMD_DRV_LOG(ERR, "flex filters are full.");
3039 rte_free(flex_filter);
3043 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
3044 (E1000_WUFC_FLX0 << flex_filter->index));
3045 queueing = filter->len |
3046 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
3047 (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
3048 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
3050 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
3051 E1000_WRITE_REG(hw, reg_off,
3052 flex_filter->filter_info.dwords[j]);
3053 reg_off += sizeof(uint32_t);
3054 E1000_WRITE_REG(hw, reg_off,
3055 flex_filter->filter_info.dwords[++j]);
3056 reg_off += sizeof(uint32_t);
3057 E1000_WRITE_REG(hw, reg_off,
3058 (uint32_t)flex_filter->filter_info.mask[i]);
3059 reg_off += sizeof(uint32_t) * 2;
3063 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3064 &flex_filter->filter_info);
3066 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3067 rte_free(flex_filter);
3071 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
3072 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
3073 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
3074 (~(E1000_WUFC_FLX0 << it->index)));
3076 filter_info->flex_mask &= ~(1 << it->index);
3077 TAILQ_REMOVE(&filter_info->flex_list, it, entries);
3079 rte_free(flex_filter);
3086 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
3087 struct rte_eth_flex_filter *filter)
3089 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3090 struct e1000_filter_info *filter_info =
3091 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3092 struct e1000_flex_filter flex_filter, *it;
3093 uint32_t wufc, queueing, wufc_en = 0;
3095 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
3096 flex_filter.filter_info.len = filter->len;
3097 flex_filter.filter_info.priority = filter->priority;
3098 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
3099 memcpy(flex_filter.filter_info.mask, filter->mask,
3100 RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
3102 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3103 &flex_filter.filter_info);
3105 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3109 wufc = E1000_READ_REG(hw, E1000_WUFC);
3110 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
3112 if ((wufc & wufc_en) == wufc_en) {
3113 uint32_t reg_off = 0;
3114 if (it->index < E1000_MAX_FHFT)
3115 reg_off = E1000_FHFT(it->index);
3117 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
3119 queueing = E1000_READ_REG(hw,
3120 reg_off + E1000_FHFT_QUEUEING_OFFSET);
3121 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
3122 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
3123 E1000_FHFT_QUEUEING_PRIO_SHIFT;
3124 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
3125 E1000_FHFT_QUEUEING_QUEUE_SHIFT;
3132 eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
3133 enum rte_filter_op filter_op,
3136 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3137 struct rte_eth_flex_filter *filter;
3140 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
3142 if (filter_op == RTE_ETH_FILTER_NOP)
3146 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
3151 filter = (struct rte_eth_flex_filter *)arg;
3152 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
3153 || filter->len % sizeof(uint64_t) != 0) {
3154 PMD_DRV_LOG(ERR, "filter's length is out of range");
3157 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
3158 PMD_DRV_LOG(ERR, "filter's priority is out of range");
3162 switch (filter_op) {
3163 case RTE_ETH_FILTER_ADD:
3164 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
3166 case RTE_ETH_FILTER_DELETE:
3167 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
3169 case RTE_ETH_FILTER_GET:
3170 ret = eth_igb_get_flex_filter(dev, filter);
3173 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
3181 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
3183 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
3184 struct e1000_5tuple_filter_info *filter_info)
3186 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
3188 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3189 return -EINVAL; /* filter index is out of range. */
3190 if (filter->tcp_flags > TCP_FLAG_ALL)
3191 return -EINVAL; /* flags is invalid. */
3193 switch (filter->dst_ip_mask) {
3195 filter_info->dst_ip_mask = 0;
3196 filter_info->dst_ip = filter->dst_ip;
3199 filter_info->dst_ip_mask = 1;
3202 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3206 switch (filter->src_ip_mask) {
3208 filter_info->src_ip_mask = 0;
3209 filter_info->src_ip = filter->src_ip;
3212 filter_info->src_ip_mask = 1;
3215 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3219 switch (filter->dst_port_mask) {
3221 filter_info->dst_port_mask = 0;
3222 filter_info->dst_port = filter->dst_port;
3225 filter_info->dst_port_mask = 1;
3228 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3232 switch (filter->src_port_mask) {
3234 filter_info->src_port_mask = 0;
3235 filter_info->src_port = filter->src_port;
3238 filter_info->src_port_mask = 1;
3241 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3245 switch (filter->proto_mask) {
3247 filter_info->proto_mask = 0;
3248 filter_info->proto = filter->proto;
3251 filter_info->proto_mask = 1;
3254 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3258 filter_info->priority = (uint8_t)filter->priority;
3259 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3260 filter_info->tcp_flags = filter->tcp_flags;
3262 filter_info->tcp_flags = 0;
3267 static inline struct e1000_5tuple_filter *
3268 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
3269 struct e1000_5tuple_filter_info *key)
3271 struct e1000_5tuple_filter *it;
3273 TAILQ_FOREACH(it, filter_list, entries) {
3274 if (memcmp(key, &it->filter_info,
3275 sizeof(struct e1000_5tuple_filter_info)) == 0) {
3283 * igb_add_5tuple_filter_82576 - add a 5tuple filter
3286 * dev: Pointer to struct rte_eth_dev.
3287 * ntuple_filter: ponter to the filter that will be added.
3290 * - On success, zero.
3291 * - On failure, a negative value.
3294 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
3295 struct rte_eth_ntuple_filter *ntuple_filter)
3297 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3298 struct e1000_filter_info *filter_info =
3299 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3300 struct e1000_5tuple_filter *filter;
3301 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
3302 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3306 filter = rte_zmalloc("e1000_5tuple_filter",
3307 sizeof(struct e1000_5tuple_filter), 0);
3311 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3312 &filter->filter_info);
3318 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3319 &filter->filter_info) != NULL) {
3320 PMD_DRV_LOG(ERR, "filter exists.");
3324 filter->queue = ntuple_filter->queue;
3327 * look for an unused 5tuple filter index,
3328 * and insert the filter to list.
3330 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
3331 if (!(filter_info->fivetuple_mask & (1 << i))) {
3332 filter_info->fivetuple_mask |= 1 << i;
3334 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3340 if (i >= E1000_MAX_FTQF_FILTERS) {
3341 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3346 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
3347 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
3348 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
3349 if (filter->filter_info.dst_ip_mask == 0)
3350 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
3351 if (filter->filter_info.src_port_mask == 0)
3352 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
3353 if (filter->filter_info.proto_mask == 0)
3354 ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
3355 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
3356 E1000_FTQF_QUEUE_MASK;
3357 ftqf |= E1000_FTQF_QUEUE_ENABLE;
3358 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
3359 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
3360 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
3362 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
3363 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
3365 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3366 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3367 imir |= E1000_IMIR_PORT_BP;
3369 imir &= ~E1000_IMIR_PORT_BP;
3370 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3372 /* tcp flags bits setting. */
3373 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
3374 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
3375 imir_ext |= E1000_IMIREXT_CTRL_URG;
3376 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
3377 imir_ext |= E1000_IMIREXT_CTRL_ACK;
3378 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
3379 imir_ext |= E1000_IMIREXT_CTRL_PSH;
3380 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
3381 imir_ext |= E1000_IMIREXT_CTRL_RST;
3382 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
3383 imir_ext |= E1000_IMIREXT_CTRL_SYN;
3384 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
3385 imir_ext |= E1000_IMIREXT_CTRL_FIN;
3387 imir_ext |= E1000_IMIREXT_CTRL_BP;
3388 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3389 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3394 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
3397 * dev: Pointer to struct rte_eth_dev.
3398 * ntuple_filter: ponter to the filter that will be removed.
3401 * - On success, zero.
3402 * - On failure, a negative value.
3405 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
3406 struct rte_eth_ntuple_filter *ntuple_filter)
3408 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3409 struct e1000_filter_info *filter_info =
3410 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3411 struct e1000_5tuple_filter_info filter_5tuple;
3412 struct e1000_5tuple_filter *filter;
3415 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
3416 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3421 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3423 if (filter == NULL) {
3424 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3428 filter_info->fivetuple_mask &= ~(1 << filter->index);
3429 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3432 E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
3433 E1000_FTQF_VF_BP | E1000_FTQF_MASK);
3434 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
3435 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
3436 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
3437 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3438 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3443 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3446 struct e1000_hw *hw;
3447 struct rte_eth_dev_info dev_info;
3448 uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
3451 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3453 #ifdef RTE_LIBRTE_82571_SUPPORT
3454 /* XXX: not bigger than max_rx_pktlen */
3455 if (hw->mac.type == e1000_82571)
3458 eth_igb_infos_get(dev, &dev_info);
3460 /* check that mtu is within the allowed range */
3461 if ((mtu < ETHER_MIN_MTU) ||
3462 (frame_size > dev_info.max_rx_pktlen))
3465 /* refuse mtu that requires the support of scattered packets when this
3466 * feature has not been enabled before. */
3467 if (!dev->data->scattered_rx &&
3468 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
3471 rctl = E1000_READ_REG(hw, E1000_RCTL);
3473 /* switch to jumbo mode if needed */
3474 if (frame_size > ETHER_MAX_LEN) {
3475 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3476 rctl |= E1000_RCTL_LPE;
3478 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3479 rctl &= ~E1000_RCTL_LPE;
3481 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3483 /* update max frame size */
3484 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3486 E1000_WRITE_REG(hw, E1000_RLPML,
3487 dev->data->dev_conf.rxmode.max_rx_pkt_len);
3493 * igb_add_del_ntuple_filter - add or delete a ntuple filter
3496 * dev: Pointer to struct rte_eth_dev.
3497 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3498 * add: if true, add filter, if false, remove filter
3501 * - On success, zero.
3502 * - On failure, a negative value.
3505 igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
3506 struct rte_eth_ntuple_filter *ntuple_filter,
3509 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3512 switch (ntuple_filter->flags) {
3513 case RTE_5TUPLE_FLAGS:
3514 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3515 if (hw->mac.type != e1000_82576)
3518 ret = igb_add_5tuple_filter_82576(dev,
3521 ret = igb_remove_5tuple_filter_82576(dev,
3524 case RTE_2TUPLE_FLAGS:
3525 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3526 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3529 ret = igb_add_2tuple_filter(dev, ntuple_filter);
3531 ret = igb_remove_2tuple_filter(dev, ntuple_filter);
3542 * igb_get_ntuple_filter - get a ntuple filter
3545 * dev: Pointer to struct rte_eth_dev.
3546 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3549 * - On success, zero.
3550 * - On failure, a negative value.
3553 igb_get_ntuple_filter(struct rte_eth_dev *dev,
3554 struct rte_eth_ntuple_filter *ntuple_filter)
3556 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3557 struct e1000_filter_info *filter_info =
3558 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3559 struct e1000_5tuple_filter_info filter_5tuple;
3560 struct e1000_2tuple_filter_info filter_2tuple;
3561 struct e1000_5tuple_filter *p_5tuple_filter;
3562 struct e1000_2tuple_filter *p_2tuple_filter;
3565 switch (ntuple_filter->flags) {
3566 case RTE_5TUPLE_FLAGS:
3567 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3568 if (hw->mac.type != e1000_82576)
3570 memset(&filter_5tuple,
3572 sizeof(struct e1000_5tuple_filter_info));
3573 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3577 p_5tuple_filter = igb_5tuple_filter_lookup_82576(
3578 &filter_info->fivetuple_list,
3580 if (p_5tuple_filter == NULL) {
3581 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3584 ntuple_filter->queue = p_5tuple_filter->queue;
3586 case RTE_2TUPLE_FLAGS:
3587 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3588 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3590 memset(&filter_2tuple,
3592 sizeof(struct e1000_2tuple_filter_info));
3593 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
3596 p_2tuple_filter = igb_2tuple_filter_lookup(
3597 &filter_info->twotuple_list,
3599 if (p_2tuple_filter == NULL) {
3600 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3603 ntuple_filter->queue = p_2tuple_filter->queue;
3614 * igb_ntuple_filter_handle - Handle operations for ntuple filter.
3615 * @dev: pointer to rte_eth_dev structure
3616 * @filter_op:operation will be taken.
3617 * @arg: a pointer to specific structure corresponding to the filter_op
3620 igb_ntuple_filter_handle(struct rte_eth_dev *dev,
3621 enum rte_filter_op filter_op,
3624 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3627 MAC_TYPE_FILTER_SUP(hw->mac.type);
3629 if (filter_op == RTE_ETH_FILTER_NOP)
3633 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3638 switch (filter_op) {
3639 case RTE_ETH_FILTER_ADD:
3640 ret = igb_add_del_ntuple_filter(dev,
3641 (struct rte_eth_ntuple_filter *)arg,
3644 case RTE_ETH_FILTER_DELETE:
3645 ret = igb_add_del_ntuple_filter(dev,
3646 (struct rte_eth_ntuple_filter *)arg,
3649 case RTE_ETH_FILTER_GET:
3650 ret = igb_get_ntuple_filter(dev,
3651 (struct rte_eth_ntuple_filter *)arg);
3654 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3662 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
3667 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3668 if (filter_info->ethertype_filters[i] == ethertype &&
3669 (filter_info->ethertype_mask & (1 << i)))
3676 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
3681 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3682 if (!(filter_info->ethertype_mask & (1 << i))) {
3683 filter_info->ethertype_mask |= 1 << i;
3684 filter_info->ethertype_filters[i] = ethertype;
3692 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
3695 if (idx >= E1000_MAX_ETQF_FILTERS)
3697 filter_info->ethertype_mask &= ~(1 << idx);
3698 filter_info->ethertype_filters[idx] = 0;
3704 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
3705 struct rte_eth_ethertype_filter *filter,
3708 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3709 struct e1000_filter_info *filter_info =
3710 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3714 if (filter->ether_type == ETHER_TYPE_IPv4 ||
3715 filter->ether_type == ETHER_TYPE_IPv6) {
3716 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
3717 " ethertype filter.", filter->ether_type);
3721 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
3722 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
3725 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
3726 PMD_DRV_LOG(ERR, "drop option is unsupported.");
3730 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3731 if (ret >= 0 && add) {
3732 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
3733 filter->ether_type);
3736 if (ret < 0 && !add) {
3737 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3738 filter->ether_type);
3743 ret = igb_ethertype_filter_insert(filter_info,
3744 filter->ether_type);
3746 PMD_DRV_LOG(ERR, "ethertype filters are full.");
3750 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
3751 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
3752 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
3754 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
3758 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
3759 E1000_WRITE_FLUSH(hw);
3765 igb_get_ethertype_filter(struct rte_eth_dev *dev,
3766 struct rte_eth_ethertype_filter *filter)
3768 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3769 struct e1000_filter_info *filter_info =
3770 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3774 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3776 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3777 filter->ether_type);
3781 etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
3782 if (etqf & E1000_ETQF_FILTER_ENABLE) {
3783 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
3785 filter->queue = (etqf & E1000_ETQF_QUEUE) >>
3786 E1000_ETQF_QUEUE_SHIFT;
3794 * igb_ethertype_filter_handle - Handle operations for ethertype filter.
3795 * @dev: pointer to rte_eth_dev structure
3796 * @filter_op:operation will be taken.
3797 * @arg: a pointer to specific structure corresponding to the filter_op
3800 igb_ethertype_filter_handle(struct rte_eth_dev *dev,
3801 enum rte_filter_op filter_op,
3804 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3807 MAC_TYPE_FILTER_SUP(hw->mac.type);
3809 if (filter_op == RTE_ETH_FILTER_NOP)
3813 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3818 switch (filter_op) {
3819 case RTE_ETH_FILTER_ADD:
3820 ret = igb_add_del_ethertype_filter(dev,
3821 (struct rte_eth_ethertype_filter *)arg,
3824 case RTE_ETH_FILTER_DELETE:
3825 ret = igb_add_del_ethertype_filter(dev,
3826 (struct rte_eth_ethertype_filter *)arg,
3829 case RTE_ETH_FILTER_GET:
3830 ret = igb_get_ethertype_filter(dev,
3831 (struct rte_eth_ethertype_filter *)arg);
3834 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3842 eth_igb_filter_ctrl(struct rte_eth_dev *dev,
3843 enum rte_filter_type filter_type,
3844 enum rte_filter_op filter_op,
3849 switch (filter_type) {
3850 case RTE_ETH_FILTER_NTUPLE:
3851 ret = igb_ntuple_filter_handle(dev, filter_op, arg);
3853 case RTE_ETH_FILTER_ETHERTYPE:
3854 ret = igb_ethertype_filter_handle(dev, filter_op, arg);
3856 case RTE_ETH_FILTER_SYN:
3857 ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
3859 case RTE_ETH_FILTER_FLEXIBLE:
3860 ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
3863 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3872 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
3873 struct ether_addr *mc_addr_set,
3874 uint32_t nb_mc_addr)
3876 struct e1000_hw *hw;
3878 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3879 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
3884 igb_timesync_enable(struct rte_eth_dev *dev)
3886 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3890 /* Enable system time for it isn't on by default. */
3891 tsauxc = E1000_READ_REG(hw, E1000_TSAUXC);
3892 tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME;
3893 E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc);
3895 /* Start incrementing the register used to timestamp PTP packets. */
3896 E1000_WRITE_REG(hw, E1000_TIMINCA, E1000_TIMINCA_INIT);
3898 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3899 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
3901 E1000_ETQF_FILTER_ENABLE |
3904 /* Enable timestamping of received PTP packets. */
3905 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3906 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
3907 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
3909 /* Enable Timestamping of transmitted PTP packets. */
3910 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3911 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
3912 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
3918 igb_timesync_disable(struct rte_eth_dev *dev)
3920 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3923 /* Disable timestamping of transmitted PTP packets. */
3924 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3925 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
3926 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
3928 /* Disable timestamping of received PTP packets. */
3929 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3930 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
3931 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
3933 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3934 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
3936 /* Stop incrementating the System Time registers. */
3937 E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
3943 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3944 struct timespec *timestamp,
3945 uint32_t flags __rte_unused)
3947 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3948 uint32_t tsync_rxctl;
3952 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3953 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
3956 rx_stmpl = E1000_READ_REG(hw, E1000_RXSTMPL);
3957 rx_stmph = E1000_READ_REG(hw, E1000_RXSTMPH);
3959 timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
3960 timestamp->tv_nsec = 0;
3966 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3967 struct timespec *timestamp)
3969 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3970 uint32_t tsync_txctl;
3974 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3975 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
3978 tx_stmpl = E1000_READ_REG(hw, E1000_TXSTMPL);
3979 tx_stmph = E1000_READ_REG(hw, E1000_TXSTMPH);
3981 timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
3982 timestamp->tv_nsec = 0;
3988 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
3992 const struct reg_info *reg_group;
3994 while ((reg_group = igb_regs[g_ind++]))
3995 count += igb_reg_group_count(reg_group);
4001 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4005 const struct reg_info *reg_group;
4007 while ((reg_group = igbvf_regs[g_ind++]))
4008 count += igb_reg_group_count(reg_group);
4014 eth_igb_get_regs(struct rte_eth_dev *dev,
4015 struct rte_dev_reg_info *regs)
4017 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4018 uint32_t *data = regs->data;
4021 const struct reg_info *reg_group;
4023 /* Support only full register dump */
4024 if ((regs->length == 0) ||
4025 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
4026 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4028 while ((reg_group = igb_regs[g_ind++]))
4029 count += igb_read_regs_group(dev, &data[count],
4038 igbvf_get_regs(struct rte_eth_dev *dev,
4039 struct rte_dev_reg_info *regs)
4041 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4042 uint32_t *data = regs->data;
4045 const struct reg_info *reg_group;
4047 /* Support only full register dump */
4048 if ((regs->length == 0) ||
4049 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
4050 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4052 while ((reg_group = igbvf_regs[g_ind++]))
4053 count += igb_read_regs_group(dev, &data[count],
4062 eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
4064 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4066 /* Return unit is byte count */
4067 return hw->nvm.word_size * 2;
4071 eth_igb_get_eeprom(struct rte_eth_dev *dev,
4072 struct rte_dev_eeprom_info *in_eeprom)
4074 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4075 struct e1000_nvm_info *nvm = &hw->nvm;
4076 uint16_t *data = in_eeprom->data;
4079 first = in_eeprom->offset >> 1;
4080 length = in_eeprom->length >> 1;
4081 if ((first >= hw->nvm.word_size) ||
4082 ((first + length) >= hw->nvm.word_size))
4085 in_eeprom->magic = hw->vendor_id |
4086 ((uint32_t)hw->device_id << 16);
4088 if ((nvm->ops.read) == NULL)
4091 return nvm->ops.read(hw, first, length, data);
4095 eth_igb_set_eeprom(struct rte_eth_dev *dev,
4096 struct rte_dev_eeprom_info *in_eeprom)
4098 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4099 struct e1000_nvm_info *nvm = &hw->nvm;
4100 uint16_t *data = in_eeprom->data;
4103 first = in_eeprom->offset >> 1;
4104 length = in_eeprom->length >> 1;
4105 if ((first >= hw->nvm.word_size) ||
4106 ((first + length) >= hw->nvm.word_size))
4109 in_eeprom->magic = (uint32_t)hw->vendor_id |
4110 ((uint32_t)hw->device_id << 16);
4112 if ((nvm->ops.write) == NULL)
4114 return nvm->ops.write(hw, first, length, data);
4117 static struct rte_driver pmd_igb_drv = {
4119 .init = rte_igb_pmd_init,
4122 static struct rte_driver pmd_igbvf_drv = {
4124 .init = rte_igbvf_pmd_init,
4128 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4130 struct e1000_hw *hw =
4131 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4132 uint32_t mask = 1 << queue_id;
4134 E1000_WRITE_REG(hw, E1000_EIMC, mask);
4135 E1000_WRITE_FLUSH(hw);
4141 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4143 struct e1000_hw *hw =
4144 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4145 uint32_t mask = 1 << queue_id;
4148 regval = E1000_READ_REG(hw, E1000_EIMS);
4149 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
4150 E1000_WRITE_FLUSH(hw);
4152 rte_intr_enable(&dev->pci_dev->intr_handle);
4158 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
4159 uint8_t index, uint8_t offset)
4161 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
4164 val &= ~((uint32_t)0xFF << offset);
4166 /* write vector and valid bit */
4167 val |= (msix_vector | E1000_IVAR_VALID) << offset;
4169 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
4173 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
4174 uint8_t queue, uint8_t msix_vector)
4178 if (hw->mac.type == e1000_82575) {
4180 tmp = E1000_EICR_RX_QUEUE0 << queue;
4181 else if (direction == 1)
4182 tmp = E1000_EICR_TX_QUEUE0 << queue;
4183 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
4184 } else if (hw->mac.type == e1000_82576) {
4185 if ((direction == 0) || (direction == 1))
4186 eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
4187 ((queue & 0x8) << 1) +
4189 } else if ((hw->mac.type == e1000_82580) ||
4190 (hw->mac.type == e1000_i350) ||
4191 (hw->mac.type == e1000_i354) ||
4192 (hw->mac.type == e1000_i210) ||
4193 (hw->mac.type == e1000_i211)) {
4194 if ((direction == 0) || (direction == 1))
4195 eth_igb_write_ivar(hw, msix_vector,
4197 ((queue & 0x1) << 4) +
4202 /* Sets up the hardware to generate MSI-X interrupts properly
4204 * board private structure
4207 eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
4210 uint32_t tmpval, regval, intr_mask;
4211 struct e1000_hw *hw =
4212 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4214 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4216 /* won't configure msix register if no mapping is done
4217 * between intr vector and event fd
4219 if (!rte_intr_dp_is_en(intr_handle))
4222 /* set interrupt vector for other causes */
4223 if (hw->mac.type == e1000_82575) {
4224 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
4225 /* enable MSI-X PBA support */
4226 tmpval |= E1000_CTRL_EXT_PBA_CLR;
4228 /* Auto-Mask interrupts upon ICR read */
4229 tmpval |= E1000_CTRL_EXT_EIAME;
4230 tmpval |= E1000_CTRL_EXT_IRCA;
4232 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
4234 /* enable msix_other interrupt */
4235 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
4236 regval = E1000_READ_REG(hw, E1000_EIAC);
4237 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
4238 regval = E1000_READ_REG(hw, E1000_EIAM);
4239 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
4240 } else if ((hw->mac.type == e1000_82576) ||
4241 (hw->mac.type == e1000_82580) ||
4242 (hw->mac.type == e1000_i350) ||
4243 (hw->mac.type == e1000_i354) ||
4244 (hw->mac.type == e1000_i210) ||
4245 (hw->mac.type == e1000_i211)) {
4246 /* turn on MSI-X capability first */
4247 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
4248 E1000_GPIE_PBA | E1000_GPIE_EIAME |
4251 intr_mask = (1 << intr_handle->max_intr) - 1;
4252 regval = E1000_READ_REG(hw, E1000_EIAC);
4253 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
4255 /* enable msix_other interrupt */
4256 regval = E1000_READ_REG(hw, E1000_EIMS);
4257 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
4258 tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
4259 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
4262 /* use EIAM to auto-mask when MSI-X interrupt
4263 * is asserted, this saves a register write for every interrupt
4265 intr_mask = (1 << intr_handle->nb_efd) - 1;
4266 regval = E1000_READ_REG(hw, E1000_EIAM);
4267 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
4269 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
4270 eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
4271 intr_handle->intr_vec[queue_id] = vec;
4272 if (vec < intr_handle->nb_efd - 1)
4276 E1000_WRITE_FLUSH(hw);
4279 PMD_REGISTER_DRIVER(pmd_igb_drv);
4280 PMD_REGISTER_DRIVER(pmd_igbvf_drv);