4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
51 #include <rte_atomic.h>
52 #include <rte_malloc.h>
55 #include "e1000_logs.h"
56 #include "base/e1000_api.h"
57 #include "e1000_ethdev.h"
61 * Default values for port configuration
63 #define IGB_DEFAULT_RX_FREE_THRESH 32
65 #define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
66 #define IGB_DEFAULT_RX_HTHRESH 8
67 #define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4)
69 #define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
70 #define IGB_DEFAULT_TX_HTHRESH 1
71 #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16)
73 #define IGB_HKEY_MAX_INDEX 10
75 /* Bit shift and mask */
76 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
77 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
78 #define IGB_8_BIT_WIDTH CHAR_BIT
79 #define IGB_8_BIT_MASK UINT8_MAX
81 /* Additional timesync values. */
82 #define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL
83 #define E1000_ETQF_FILTER_1588 3
84 #define IGB_82576_TSYNC_SHIFT 16
85 #define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
86 #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
87 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
89 #define E1000_VTIVAR_MISC 0x01740
90 #define E1000_VTIVAR_MISC_MASK 0xFF
91 #define E1000_VTIVAR_VALID 0x80
92 #define E1000_VTIVAR_MISC_MAILBOX 0
93 #define E1000_VTIVAR_MISC_INTR_MASK 0x3
95 /* External VLAN Enable bit mask */
96 #define E1000_CTRL_EXT_EXT_VLAN (1 << 26)
98 /* External VLAN Ether Type bit mask and shift */
99 #define E1000_VET_VET_EXT 0xFFFF0000
100 #define E1000_VET_VET_EXT_SHIFT 16
102 static int eth_igb_configure(struct rte_eth_dev *dev);
103 static int eth_igb_start(struct rte_eth_dev *dev);
104 static void eth_igb_stop(struct rte_eth_dev *dev);
105 static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev);
106 static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev);
107 static void eth_igb_close(struct rte_eth_dev *dev);
108 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
109 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
110 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
111 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
112 static int eth_igb_link_update(struct rte_eth_dev *dev,
113 int wait_to_complete);
114 static void eth_igb_stats_get(struct rte_eth_dev *dev,
115 struct rte_eth_stats *rte_stats);
116 static int eth_igb_xstats_get(struct rte_eth_dev *dev,
117 struct rte_eth_xstat *xstats, unsigned n);
118 static int eth_igb_xstats_get_names(struct rte_eth_dev *dev,
119 struct rte_eth_xstat_name *xstats_names,
121 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
122 static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
123 static int eth_igb_fw_version_get(struct rte_eth_dev *dev,
124 char *fw_version, size_t fw_size);
125 static void eth_igb_infos_get(struct rte_eth_dev *dev,
126 struct rte_eth_dev_info *dev_info);
127 static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
128 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
129 struct rte_eth_dev_info *dev_info);
130 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
131 struct rte_eth_fc_conf *fc_conf);
132 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
133 struct rte_eth_fc_conf *fc_conf);
134 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
135 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
136 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
137 static int eth_igb_interrupt_action(struct rte_eth_dev *dev,
138 struct rte_intr_handle *handle);
139 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
141 static int igb_hardware_init(struct e1000_hw *hw);
142 static void igb_hw_control_acquire(struct e1000_hw *hw);
143 static void igb_hw_control_release(struct e1000_hw *hw);
144 static void igb_init_manageability(struct e1000_hw *hw);
145 static void igb_release_manageability(struct e1000_hw *hw);
147 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
149 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
150 uint16_t vlan_id, int on);
151 static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
152 enum rte_vlan_type vlan_type,
154 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
156 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
157 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
158 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
159 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
160 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
161 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
163 static int eth_igb_led_on(struct rte_eth_dev *dev);
164 static int eth_igb_led_off(struct rte_eth_dev *dev);
166 static void igb_intr_disable(struct e1000_hw *hw);
167 static int igb_get_rx_buffer_size(struct e1000_hw *hw);
168 static void eth_igb_rar_set(struct rte_eth_dev *dev,
169 struct ether_addr *mac_addr,
170 uint32_t index, uint32_t pool);
171 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
172 static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
173 struct ether_addr *addr);
175 static void igbvf_intr_disable(struct e1000_hw *hw);
176 static int igbvf_dev_configure(struct rte_eth_dev *dev);
177 static int igbvf_dev_start(struct rte_eth_dev *dev);
178 static void igbvf_dev_stop(struct rte_eth_dev *dev);
179 static void igbvf_dev_close(struct rte_eth_dev *dev);
180 static void igbvf_promiscuous_enable(struct rte_eth_dev *dev);
181 static void igbvf_promiscuous_disable(struct rte_eth_dev *dev);
182 static void igbvf_allmulticast_enable(struct rte_eth_dev *dev);
183 static void igbvf_allmulticast_disable(struct rte_eth_dev *dev);
184 static int eth_igbvf_link_update(struct e1000_hw *hw);
185 static void eth_igbvf_stats_get(struct rte_eth_dev *dev,
186 struct rte_eth_stats *rte_stats);
187 static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
188 struct rte_eth_xstat *xstats, unsigned n);
189 static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev,
190 struct rte_eth_xstat_name *xstats_names,
192 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
193 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
194 uint16_t vlan_id, int on);
195 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
196 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
197 static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
198 struct ether_addr *addr);
199 static int igbvf_get_reg_length(struct rte_eth_dev *dev);
200 static int igbvf_get_regs(struct rte_eth_dev *dev,
201 struct rte_dev_reg_info *regs);
203 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
204 struct rte_eth_rss_reta_entry64 *reta_conf,
206 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
207 struct rte_eth_rss_reta_entry64 *reta_conf,
210 static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
211 struct rte_eth_syn_filter *filter,
213 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
214 struct rte_eth_syn_filter *filter);
215 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
216 enum rte_filter_op filter_op,
218 static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
219 struct rte_eth_ntuple_filter *ntuple_filter);
220 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
221 struct rte_eth_ntuple_filter *ntuple_filter);
222 static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
223 struct rte_eth_flex_filter *filter,
225 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
226 struct rte_eth_flex_filter *filter);
227 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
228 enum rte_filter_op filter_op,
230 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
231 struct rte_eth_ntuple_filter *ntuple_filter);
232 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
233 struct rte_eth_ntuple_filter *ntuple_filter);
234 static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
235 struct rte_eth_ntuple_filter *filter,
237 static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
238 struct rte_eth_ntuple_filter *filter);
239 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
240 enum rte_filter_op filter_op,
242 static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
243 struct rte_eth_ethertype_filter *filter,
245 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
246 enum rte_filter_op filter_op,
248 static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
249 struct rte_eth_ethertype_filter *filter);
250 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
251 enum rte_filter_type filter_type,
252 enum rte_filter_op filter_op,
254 static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
255 static int eth_igb_get_regs(struct rte_eth_dev *dev,
256 struct rte_dev_reg_info *regs);
257 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
258 static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
259 struct rte_dev_eeprom_info *eeprom);
260 static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
261 struct rte_dev_eeprom_info *eeprom);
262 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
263 struct ether_addr *mc_addr_set,
264 uint32_t nb_mc_addr);
265 static int igb_timesync_enable(struct rte_eth_dev *dev);
266 static int igb_timesync_disable(struct rte_eth_dev *dev);
267 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
268 struct timespec *timestamp,
270 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
271 struct timespec *timestamp);
272 static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
273 static int igb_timesync_read_time(struct rte_eth_dev *dev,
274 struct timespec *timestamp);
275 static int igb_timesync_write_time(struct rte_eth_dev *dev,
276 const struct timespec *timestamp);
277 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
279 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
281 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
282 uint8_t queue, uint8_t msix_vector);
283 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
284 uint8_t index, uint8_t offset);
285 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
286 static void eth_igbvf_interrupt_handler(struct rte_intr_handle *handle,
288 static void igbvf_mbx_process(struct rte_eth_dev *dev);
291 * Define VF Stats MACRO for Non "cleared on read" register
293 #define UPDATE_VF_STAT(reg, last, cur) \
295 u32 latest = E1000_READ_REG(hw, reg); \
296 cur += (latest - last) & UINT_MAX; \
300 #define IGB_FC_PAUSE_TIME 0x0680
301 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
302 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
304 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
306 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
309 * The set of PCI devices this driver supports
311 static const struct rte_pci_id pci_id_igb_map[] = {
312 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) },
313 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) },
314 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) },
315 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) },
316 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
317 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) },
318 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) },
319 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) },
321 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) },
322 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) },
323 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) },
325 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) },
326 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) },
327 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) },
328 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) },
329 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) },
330 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) },
332 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) },
333 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) },
334 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) },
335 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) },
336 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) },
337 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) },
338 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) },
339 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) },
340 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) },
341 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) },
342 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) },
343 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) },
344 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
345 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) },
346 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
347 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) },
348 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) },
349 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
350 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) },
351 { .vendor_id = 0, /* sentinel */ },
355 * The set of PCI devices this driver supports (for 82576&I350 VF)
357 static const struct rte_pci_id pci_id_igbvf_map[] = {
358 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) },
359 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) },
360 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) },
361 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) },
362 { .vendor_id = 0, /* sentinel */ },
365 static const struct rte_eth_desc_lim rx_desc_lim = {
366 .nb_max = E1000_MAX_RING_DESC,
367 .nb_min = E1000_MIN_RING_DESC,
368 .nb_align = IGB_RXD_ALIGN,
371 static const struct rte_eth_desc_lim tx_desc_lim = {
372 .nb_max = E1000_MAX_RING_DESC,
373 .nb_min = E1000_MIN_RING_DESC,
374 .nb_align = IGB_RXD_ALIGN,
375 .nb_seg_max = IGB_TX_MAX_SEG,
376 .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG,
379 static const struct eth_dev_ops eth_igb_ops = {
380 .dev_configure = eth_igb_configure,
381 .dev_start = eth_igb_start,
382 .dev_stop = eth_igb_stop,
383 .dev_set_link_up = eth_igb_dev_set_link_up,
384 .dev_set_link_down = eth_igb_dev_set_link_down,
385 .dev_close = eth_igb_close,
386 .promiscuous_enable = eth_igb_promiscuous_enable,
387 .promiscuous_disable = eth_igb_promiscuous_disable,
388 .allmulticast_enable = eth_igb_allmulticast_enable,
389 .allmulticast_disable = eth_igb_allmulticast_disable,
390 .link_update = eth_igb_link_update,
391 .stats_get = eth_igb_stats_get,
392 .xstats_get = eth_igb_xstats_get,
393 .xstats_get_names = eth_igb_xstats_get_names,
394 .stats_reset = eth_igb_stats_reset,
395 .xstats_reset = eth_igb_xstats_reset,
396 .fw_version_get = eth_igb_fw_version_get,
397 .dev_infos_get = eth_igb_infos_get,
398 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
399 .mtu_set = eth_igb_mtu_set,
400 .vlan_filter_set = eth_igb_vlan_filter_set,
401 .vlan_tpid_set = eth_igb_vlan_tpid_set,
402 .vlan_offload_set = eth_igb_vlan_offload_set,
403 .rx_queue_setup = eth_igb_rx_queue_setup,
404 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
405 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
406 .rx_queue_release = eth_igb_rx_queue_release,
407 .rx_queue_count = eth_igb_rx_queue_count,
408 .rx_descriptor_done = eth_igb_rx_descriptor_done,
409 .rx_descriptor_status = eth_igb_rx_descriptor_status,
410 .tx_descriptor_status = eth_igb_tx_descriptor_status,
411 .tx_queue_setup = eth_igb_tx_queue_setup,
412 .tx_queue_release = eth_igb_tx_queue_release,
413 .tx_done_cleanup = eth_igb_tx_done_cleanup,
414 .dev_led_on = eth_igb_led_on,
415 .dev_led_off = eth_igb_led_off,
416 .flow_ctrl_get = eth_igb_flow_ctrl_get,
417 .flow_ctrl_set = eth_igb_flow_ctrl_set,
418 .mac_addr_add = eth_igb_rar_set,
419 .mac_addr_remove = eth_igb_rar_clear,
420 .mac_addr_set = eth_igb_default_mac_addr_set,
421 .reta_update = eth_igb_rss_reta_update,
422 .reta_query = eth_igb_rss_reta_query,
423 .rss_hash_update = eth_igb_rss_hash_update,
424 .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
425 .filter_ctrl = eth_igb_filter_ctrl,
426 .set_mc_addr_list = eth_igb_set_mc_addr_list,
427 .rxq_info_get = igb_rxq_info_get,
428 .txq_info_get = igb_txq_info_get,
429 .timesync_enable = igb_timesync_enable,
430 .timesync_disable = igb_timesync_disable,
431 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
432 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
433 .get_reg = eth_igb_get_regs,
434 .get_eeprom_length = eth_igb_get_eeprom_length,
435 .get_eeprom = eth_igb_get_eeprom,
436 .set_eeprom = eth_igb_set_eeprom,
437 .timesync_adjust_time = igb_timesync_adjust_time,
438 .timesync_read_time = igb_timesync_read_time,
439 .timesync_write_time = igb_timesync_write_time,
443 * dev_ops for virtual function, bare necessities for basic vf
444 * operation have been implemented
446 static const struct eth_dev_ops igbvf_eth_dev_ops = {
447 .dev_configure = igbvf_dev_configure,
448 .dev_start = igbvf_dev_start,
449 .dev_stop = igbvf_dev_stop,
450 .dev_close = igbvf_dev_close,
451 .promiscuous_enable = igbvf_promiscuous_enable,
452 .promiscuous_disable = igbvf_promiscuous_disable,
453 .allmulticast_enable = igbvf_allmulticast_enable,
454 .allmulticast_disable = igbvf_allmulticast_disable,
455 .link_update = eth_igb_link_update,
456 .stats_get = eth_igbvf_stats_get,
457 .xstats_get = eth_igbvf_xstats_get,
458 .xstats_get_names = eth_igbvf_xstats_get_names,
459 .stats_reset = eth_igbvf_stats_reset,
460 .xstats_reset = eth_igbvf_stats_reset,
461 .vlan_filter_set = igbvf_vlan_filter_set,
462 .dev_infos_get = eth_igbvf_infos_get,
463 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
464 .rx_queue_setup = eth_igb_rx_queue_setup,
465 .rx_queue_release = eth_igb_rx_queue_release,
466 .tx_queue_setup = eth_igb_tx_queue_setup,
467 .tx_queue_release = eth_igb_tx_queue_release,
468 .set_mc_addr_list = eth_igb_set_mc_addr_list,
469 .rxq_info_get = igb_rxq_info_get,
470 .txq_info_get = igb_txq_info_get,
471 .mac_addr_set = igbvf_default_mac_addr_set,
472 .get_reg = igbvf_get_regs,
475 /* store statistics names and its offset in stats structure */
476 struct rte_igb_xstats_name_off {
477 char name[RTE_ETH_XSTATS_NAME_SIZE];
481 static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = {
482 {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)},
483 {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)},
484 {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)},
485 {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)},
486 {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)},
487 {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)},
488 {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats,
490 {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)},
491 {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)},
492 {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)},
493 {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)},
494 {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)},
495 {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)},
496 {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)},
497 {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)},
498 {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)},
499 {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)},
500 {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats,
502 {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)},
503 {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)},
504 {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)},
505 {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)},
506 {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
508 {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats,
510 {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)},
511 {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)},
512 {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)},
513 {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)},
514 {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)},
515 {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)},
516 {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)},
517 {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)},
518 {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)},
519 {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)},
520 {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)},
521 {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)},
522 {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)},
523 {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)},
524 {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)},
525 {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)},
526 {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)},
527 {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
529 {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats,
531 {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)},
532 {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)},
533 {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)},
534 {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)},
535 {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)},
536 {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)},
537 {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)},
539 {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)},
542 #define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \
543 sizeof(rte_igb_stats_strings[0]))
545 static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = {
546 {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)},
547 {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)},
548 {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)},
549 {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)},
550 {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)},
553 #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
554 sizeof(rte_igbvf_stats_strings[0]))
557 * Atomically reads the link status information from global
558 * structure rte_eth_dev.
561 * - Pointer to the structure rte_eth_dev to read from.
562 * - Pointer to the buffer to be saved with the link status.
565 * - On success, zero.
566 * - On failure, negative value.
569 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
570 struct rte_eth_link *link)
572 struct rte_eth_link *dst = link;
573 struct rte_eth_link *src = &(dev->data->dev_link);
575 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
576 *(uint64_t *)src) == 0)
583 * Atomically writes the link status information into global
584 * structure rte_eth_dev.
587 * - Pointer to the structure rte_eth_dev to read from.
588 * - Pointer to the buffer to be saved with the link status.
591 * - On success, zero.
592 * - On failure, negative value.
595 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
596 struct rte_eth_link *link)
598 struct rte_eth_link *dst = &(dev->data->dev_link);
599 struct rte_eth_link *src = link;
601 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
602 *(uint64_t *)src) == 0)
609 igb_intr_enable(struct rte_eth_dev *dev)
611 struct e1000_interrupt *intr =
612 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
613 struct e1000_hw *hw =
614 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
616 E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
617 E1000_WRITE_FLUSH(hw);
621 igb_intr_disable(struct e1000_hw *hw)
623 E1000_WRITE_REG(hw, E1000_IMC, ~0);
624 E1000_WRITE_FLUSH(hw);
628 igbvf_intr_enable(struct rte_eth_dev *dev)
630 struct e1000_hw *hw =
631 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
633 /* only for mailbox */
634 E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX);
635 E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX);
636 E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX);
637 E1000_WRITE_FLUSH(hw);
640 /* only for mailbox now. If RX/TX needed, should extend this function. */
642 igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector)
647 tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK);
648 tmp |= E1000_VTIVAR_VALID;
649 E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp);
653 eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev)
655 struct e1000_hw *hw =
656 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
658 /* Configure VF other cause ivar */
659 igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX);
662 static inline int32_t
663 igb_pf_reset_hw(struct e1000_hw *hw)
668 status = e1000_reset_hw(hw);
670 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
671 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
672 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
673 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
674 E1000_WRITE_FLUSH(hw);
680 igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
682 struct e1000_hw *hw =
683 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
686 hw->vendor_id = pci_dev->id.vendor_id;
687 hw->device_id = pci_dev->id.device_id;
688 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
689 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
691 e1000_set_mac_type(hw);
693 /* need to check if it is a vf device below */
697 igb_reset_swfw_lock(struct e1000_hw *hw)
702 * Do mac ops initialization manually here, since we will need
703 * some function pointers set by this call.
705 ret_val = e1000_init_mac_params(hw);
710 * SMBI lock should not fail in this early stage. If this is the case,
711 * it is due to an improper exit of the application.
712 * So force the release of the faulty lock.
714 if (e1000_get_hw_semaphore_generic(hw) < 0) {
715 PMD_DRV_LOG(DEBUG, "SMBI lock released");
717 e1000_put_hw_semaphore_generic(hw);
719 if (hw->mac.ops.acquire_swfw_sync != NULL) {
723 * Phy lock should not fail in this early stage. If this is the case,
724 * it is due to an improper exit of the application.
725 * So force the release of the faulty lock.
727 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
728 if (hw->bus.func > E1000_FUNC_1)
730 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
731 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
734 hw->mac.ops.release_swfw_sync(hw, mask);
737 * This one is more tricky since it is common to all ports; but
738 * swfw_sync retries last long enough (1s) to be almost sure that if
739 * lock can not be taken it is due to an improper lock of the
742 mask = E1000_SWFW_EEP_SM;
743 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
744 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
746 hw->mac.ops.release_swfw_sync(hw, mask);
749 return E1000_SUCCESS;
753 eth_igb_dev_init(struct rte_eth_dev *eth_dev)
756 struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
757 struct e1000_hw *hw =
758 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
759 struct e1000_vfta * shadow_vfta =
760 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
761 struct e1000_filter_info *filter_info =
762 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
763 struct e1000_adapter *adapter =
764 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
768 eth_dev->dev_ops = ð_igb_ops;
769 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
770 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
771 eth_dev->tx_pkt_prepare = ð_igb_prep_pkts;
773 /* for secondary processes, we don't initialise any further as primary
774 * has already done this work. Only check we don't need a different
776 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
777 if (eth_dev->data->scattered_rx)
778 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
782 rte_eth_copy_pci_info(eth_dev, pci_dev);
783 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
785 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
787 igb_identify_hardware(eth_dev, pci_dev);
788 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
793 e1000_get_bus_info(hw);
795 /* Reset any pending lock */
796 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
801 /* Finish initialization */
802 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
808 hw->phy.autoneg_wait_to_complete = 0;
809 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
812 if (hw->phy.media_type == e1000_media_type_copper) {
813 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
814 hw->phy.disable_polarity_correction = 0;
815 hw->phy.ms_type = e1000_ms_hw_default;
819 * Start from a known state, this is important in reading the nvm
824 /* Make sure we have a good EEPROM before we read from it */
825 if (e1000_validate_nvm_checksum(hw) < 0) {
827 * Some PCI-E parts fail the first check due to
828 * the link being in sleep state, call it again,
829 * if it fails a second time its a real issue.
831 if (e1000_validate_nvm_checksum(hw) < 0) {
832 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
838 /* Read the permanent MAC address out of the EEPROM */
839 if (e1000_read_mac_addr(hw) != 0) {
840 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
845 /* Allocate memory for storing MAC addresses */
846 eth_dev->data->mac_addrs = rte_zmalloc("e1000",
847 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
848 if (eth_dev->data->mac_addrs == NULL) {
849 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
850 "store MAC addresses",
851 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
856 /* Copy the permanent MAC address */
857 ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]);
859 /* initialize the vfta */
860 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
862 /* Now initialize the hardware */
863 if (igb_hardware_init(hw) != 0) {
864 PMD_INIT_LOG(ERR, "Hardware initialization failed");
865 rte_free(eth_dev->data->mac_addrs);
866 eth_dev->data->mac_addrs = NULL;
870 hw->mac.get_link_status = 1;
871 adapter->stopped = 0;
873 /* Indicate SOL/IDER usage */
874 if (e1000_check_reset_block(hw) < 0) {
875 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
879 /* initialize PF if max_vfs not zero */
880 igb_pf_host_init(eth_dev);
882 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
883 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
884 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
885 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
886 E1000_WRITE_FLUSH(hw);
888 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
889 eth_dev->data->port_id, pci_dev->id.vendor_id,
890 pci_dev->id.device_id);
892 rte_intr_callback_register(&pci_dev->intr_handle,
893 eth_igb_interrupt_handler,
896 /* enable uio/vfio intr/eventfd mapping */
897 rte_intr_enable(&pci_dev->intr_handle);
899 /* enable support intr */
900 igb_intr_enable(eth_dev);
902 TAILQ_INIT(&filter_info->flex_list);
903 filter_info->flex_mask = 0;
904 TAILQ_INIT(&filter_info->twotuple_list);
905 filter_info->twotuple_mask = 0;
906 TAILQ_INIT(&filter_info->fivetuple_list);
907 filter_info->fivetuple_mask = 0;
912 igb_hw_control_release(hw);
918 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
920 struct rte_pci_device *pci_dev;
921 struct rte_intr_handle *intr_handle;
923 struct e1000_adapter *adapter =
924 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
926 PMD_INIT_FUNC_TRACE();
928 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
931 hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
932 pci_dev = E1000_DEV_TO_PCI(eth_dev);
933 intr_handle = &pci_dev->intr_handle;
935 if (adapter->stopped == 0)
936 eth_igb_close(eth_dev);
938 eth_dev->dev_ops = NULL;
939 eth_dev->rx_pkt_burst = NULL;
940 eth_dev->tx_pkt_burst = NULL;
942 /* Reset any pending lock */
943 igb_reset_swfw_lock(hw);
945 rte_free(eth_dev->data->mac_addrs);
946 eth_dev->data->mac_addrs = NULL;
948 /* uninitialize PF if max_vfs not zero */
949 igb_pf_host_uninit(eth_dev);
951 /* disable uio intr before callback unregister */
952 rte_intr_disable(intr_handle);
953 rte_intr_callback_unregister(intr_handle,
954 eth_igb_interrupt_handler, eth_dev);
960 * Virtual Function device init
963 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
965 struct rte_pci_device *pci_dev;
966 struct rte_intr_handle *intr_handle;
967 struct e1000_adapter *adapter =
968 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
969 struct e1000_hw *hw =
970 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
972 struct ether_addr *perm_addr = (struct ether_addr *)hw->mac.perm_addr;
974 PMD_INIT_FUNC_TRACE();
976 eth_dev->dev_ops = &igbvf_eth_dev_ops;
977 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
978 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
979 eth_dev->tx_pkt_prepare = ð_igb_prep_pkts;
981 /* for secondary processes, we don't initialise any further as primary
982 * has already done this work. Only check we don't need a different
984 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
985 if (eth_dev->data->scattered_rx)
986 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
990 pci_dev = E1000_DEV_TO_PCI(eth_dev);
991 rte_eth_copy_pci_info(eth_dev, pci_dev);
992 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
994 hw->device_id = pci_dev->id.device_id;
995 hw->vendor_id = pci_dev->id.vendor_id;
996 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
997 adapter->stopped = 0;
999 /* Initialize the shared code (base driver) */
1000 diag = e1000_setup_init_funcs(hw, TRUE);
1002 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
1007 /* init_mailbox_params */
1008 hw->mbx.ops.init_params(hw);
1010 /* Disable the interrupts for VF */
1011 igbvf_intr_disable(hw);
1013 diag = hw->mac.ops.reset_hw(hw);
1015 /* Allocate memory for storing MAC addresses */
1016 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
1017 hw->mac.rar_entry_count, 0);
1018 if (eth_dev->data->mac_addrs == NULL) {
1020 "Failed to allocate %d bytes needed to store MAC "
1022 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
1026 /* Generate a random MAC address, if none was assigned by PF. */
1027 if (is_zero_ether_addr(perm_addr)) {
1028 eth_random_addr(perm_addr->addr_bytes);
1029 diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
1031 rte_free(eth_dev->data->mac_addrs);
1032 eth_dev->data->mac_addrs = NULL;
1035 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1036 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1037 "%02x:%02x:%02x:%02x:%02x:%02x",
1038 perm_addr->addr_bytes[0],
1039 perm_addr->addr_bytes[1],
1040 perm_addr->addr_bytes[2],
1041 perm_addr->addr_bytes[3],
1042 perm_addr->addr_bytes[4],
1043 perm_addr->addr_bytes[5]);
1046 /* Copy the permanent MAC address */
1047 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1048 ð_dev->data->mac_addrs[0]);
1050 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
1052 eth_dev->data->port_id, pci_dev->id.vendor_id,
1053 pci_dev->id.device_id, "igb_mac_82576_vf");
1055 intr_handle = &pci_dev->intr_handle;
1056 rte_intr_callback_register(intr_handle,
1057 eth_igbvf_interrupt_handler, eth_dev);
1063 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
1065 struct e1000_adapter *adapter =
1066 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
1067 struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
1069 PMD_INIT_FUNC_TRACE();
1071 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1074 if (adapter->stopped == 0)
1075 igbvf_dev_close(eth_dev);
1077 eth_dev->dev_ops = NULL;
1078 eth_dev->rx_pkt_burst = NULL;
1079 eth_dev->tx_pkt_burst = NULL;
1081 rte_free(eth_dev->data->mac_addrs);
1082 eth_dev->data->mac_addrs = NULL;
1084 /* disable uio intr before callback unregister */
1085 rte_intr_disable(&pci_dev->intr_handle);
1086 rte_intr_callback_unregister(&pci_dev->intr_handle,
1087 eth_igbvf_interrupt_handler,
1093 static struct eth_driver rte_igb_pmd = {
1095 .id_table = pci_id_igb_map,
1096 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1097 .probe = rte_eth_dev_pci_probe,
1098 .remove = rte_eth_dev_pci_remove,
1100 .eth_dev_init = eth_igb_dev_init,
1101 .eth_dev_uninit = eth_igb_dev_uninit,
1102 .dev_private_size = sizeof(struct e1000_adapter),
1106 * virtual function driver struct
1108 static struct eth_driver rte_igbvf_pmd = {
1110 .id_table = pci_id_igbvf_map,
1111 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1112 .probe = rte_eth_dev_pci_probe,
1113 .remove = rte_eth_dev_pci_remove,
1115 .eth_dev_init = eth_igbvf_dev_init,
1116 .eth_dev_uninit = eth_igbvf_dev_uninit,
1117 .dev_private_size = sizeof(struct e1000_adapter),
1121 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1123 struct e1000_hw *hw =
1124 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1125 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
1126 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
1127 rctl |= E1000_RCTL_VFE;
1128 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1132 igb_check_mq_mode(struct rte_eth_dev *dev)
1134 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1135 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1136 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1137 uint16_t nb_tx_q = dev->data->nb_rx_queues;
1139 if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
1140 tx_mq_mode == ETH_MQ_TX_DCB ||
1141 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1142 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
1145 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1146 /* Check multi-queue mode.
1147 * To no break software we accept ETH_MQ_RX_NONE as this might
1148 * be used to turn off VLAN filter.
1151 if (rx_mq_mode == ETH_MQ_RX_NONE ||
1152 rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1153 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
1154 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1156 /* Only support one queue on VFs.
1157 * RSS together with SRIOV is not supported.
1159 PMD_INIT_LOG(ERR, "SRIOV is active,"
1160 " wrong mq_mode rx %d.",
1164 /* TX mode is not used here, so mode might be ignored.*/
1165 if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
1166 /* SRIOV only works in VMDq enable mode */
1167 PMD_INIT_LOG(WARNING, "SRIOV is active,"
1168 " TX mode %d is not supported. "
1169 " Driver will behave as %d mode.",
1170 tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
1173 /* check valid queue number */
1174 if ((nb_rx_q > 1) || (nb_tx_q > 1)) {
1175 PMD_INIT_LOG(ERR, "SRIOV is active,"
1176 " only support one queue on VFs.");
1180 /* To no break software that set invalid mode, only display
1181 * warning if invalid mode is used.
1183 if (rx_mq_mode != ETH_MQ_RX_NONE &&
1184 rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
1185 rx_mq_mode != ETH_MQ_RX_RSS) {
1186 /* RSS together with VMDq not supported*/
1187 PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
1192 if (tx_mq_mode != ETH_MQ_TX_NONE &&
1193 tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
1194 PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
1195 " Due to txmode is meaningless in this"
1196 " driver, just ignore.",
1204 eth_igb_configure(struct rte_eth_dev *dev)
1206 struct e1000_interrupt *intr =
1207 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1210 PMD_INIT_FUNC_TRACE();
1212 /* multipe queue mode checking */
1213 ret = igb_check_mq_mode(dev);
1215 PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
1220 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1221 PMD_INIT_FUNC_TRACE();
1227 eth_igb_start(struct rte_eth_dev *dev)
1229 struct e1000_hw *hw =
1230 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1231 struct e1000_adapter *adapter =
1232 E1000_DEV_PRIVATE(dev->data->dev_private);
1233 struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
1234 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1236 uint32_t intr_vector = 0;
1242 PMD_INIT_FUNC_TRACE();
1244 /* disable uio/vfio intr/eventfd mapping */
1245 rte_intr_disable(intr_handle);
1247 /* Power up the phy. Needed to make the link go Up */
1248 eth_igb_dev_set_link_up(dev);
1251 * Packet Buffer Allocation (PBA)
1252 * Writing PBA sets the receive portion of the buffer
1253 * the remainder is used for the transmit buffer.
1255 if (hw->mac.type == e1000_82575) {
1258 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1259 E1000_WRITE_REG(hw, E1000_PBA, pba);
1262 /* Put the address into the Receive Address Array */
1263 e1000_rar_set(hw, hw->mac.addr, 0);
1265 /* Initialize the hardware */
1266 if (igb_hardware_init(hw)) {
1267 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
1270 adapter->stopped = 0;
1272 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1274 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1275 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1276 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
1277 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1278 E1000_WRITE_FLUSH(hw);
1280 /* configure PF module if SRIOV enabled */
1281 igb_pf_host_configure(dev);
1283 /* check and configure queue intr-vector mapping */
1284 if ((rte_intr_cap_multiple(intr_handle) ||
1285 !RTE_ETH_DEV_SRIOV(dev).active) &&
1286 dev->data->dev_conf.intr_conf.rxq != 0) {
1287 intr_vector = dev->data->nb_rx_queues;
1288 if (rte_intr_efd_enable(intr_handle, intr_vector))
1292 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1293 intr_handle->intr_vec =
1294 rte_zmalloc("intr_vec",
1295 dev->data->nb_rx_queues * sizeof(int), 0);
1296 if (intr_handle->intr_vec == NULL) {
1297 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1298 " intr_vec", dev->data->nb_rx_queues);
1303 /* confiugre msix for rx interrupt */
1304 eth_igb_configure_msix_intr(dev);
1306 /* Configure for OS presence */
1307 igb_init_manageability(hw);
1309 eth_igb_tx_init(dev);
1311 /* This can fail when allocating mbufs for descriptor rings */
1312 ret = eth_igb_rx_init(dev);
1314 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1315 igb_dev_clear_queues(dev);
1319 e1000_clear_hw_cntrs_base_generic(hw);
1322 * VLAN Offload Settings
1324 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
1325 ETH_VLAN_EXTEND_MASK;
1326 eth_igb_vlan_offload_set(dev, mask);
1328 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1329 /* Enable VLAN filter since VMDq always use VLAN filter */
1330 igb_vmdq_vlan_hw_filter_enable(dev);
1333 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
1334 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
1335 (hw->mac.type == e1000_i211)) {
1336 /* Configure EITR with the maximum possible value (0xFFFF) */
1337 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
1340 /* Setup link speed and duplex */
1341 speeds = &dev->data->dev_conf.link_speeds;
1342 if (*speeds == ETH_LINK_SPEED_AUTONEG) {
1343 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
1344 hw->mac.autoneg = 1;
1347 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
1350 hw->phy.autoneg_advertised = 0;
1352 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
1353 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
1354 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
1356 goto error_invalid_config;
1358 if (*speeds & ETH_LINK_SPEED_10M_HD) {
1359 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
1362 if (*speeds & ETH_LINK_SPEED_10M) {
1363 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
1366 if (*speeds & ETH_LINK_SPEED_100M_HD) {
1367 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
1370 if (*speeds & ETH_LINK_SPEED_100M) {
1371 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
1374 if (*speeds & ETH_LINK_SPEED_1G) {
1375 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
1378 if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
1379 goto error_invalid_config;
1381 /* Set/reset the mac.autoneg based on the link speed,
1385 hw->mac.autoneg = 0;
1386 hw->mac.forced_speed_duplex =
1387 hw->phy.autoneg_advertised;
1389 hw->mac.autoneg = 1;
1393 e1000_setup_link(hw);
1395 if (rte_intr_allow_others(intr_handle)) {
1396 /* check if lsc interrupt is enabled */
1397 if (dev->data->dev_conf.intr_conf.lsc != 0)
1398 eth_igb_lsc_interrupt_setup(dev);
1400 rte_intr_callback_unregister(intr_handle,
1401 eth_igb_interrupt_handler,
1403 if (dev->data->dev_conf.intr_conf.lsc != 0)
1404 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1405 " no intr multiplex");
1408 /* check if rxq interrupt is enabled */
1409 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1410 rte_intr_dp_is_en(intr_handle))
1411 eth_igb_rxq_interrupt_setup(dev);
1413 /* enable uio/vfio intr/eventfd mapping */
1414 rte_intr_enable(intr_handle);
1416 /* resume enabled intr since hw reset */
1417 igb_intr_enable(dev);
1419 PMD_INIT_LOG(DEBUG, "<<");
1423 error_invalid_config:
1424 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1425 dev->data->dev_conf.link_speeds, dev->data->port_id);
1426 igb_dev_clear_queues(dev);
1430 /*********************************************************************
1432 * This routine disables all traffic on the adapter by issuing a
1433 * global reset on the MAC.
1435 **********************************************************************/
1437 eth_igb_stop(struct rte_eth_dev *dev)
1439 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1440 struct e1000_filter_info *filter_info =
1441 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1442 struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
1443 struct rte_eth_link link;
1444 struct e1000_flex_filter *p_flex;
1445 struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
1446 struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
1447 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1449 igb_intr_disable(hw);
1451 /* disable intr eventfd mapping */
1452 rte_intr_disable(intr_handle);
1454 igb_pf_reset_hw(hw);
1455 E1000_WRITE_REG(hw, E1000_WUC, 0);
1457 /* Set bit for Go Link disconnect */
1458 if (hw->mac.type >= e1000_82580) {
1461 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1462 phpm_reg |= E1000_82580_PM_GO_LINKD;
1463 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1466 /* Power down the phy. Needed to make the link go Down */
1467 eth_igb_dev_set_link_down(dev);
1469 igb_dev_clear_queues(dev);
1471 /* clear the recorded link status */
1472 memset(&link, 0, sizeof(link));
1473 rte_igb_dev_atomic_write_link_status(dev, &link);
1475 /* Remove all flex filters of the device */
1476 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
1477 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
1480 filter_info->flex_mask = 0;
1482 /* Remove all ntuple filters of the device */
1483 for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
1484 p_5tuple != NULL; p_5tuple = p_5tuple_next) {
1485 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
1486 TAILQ_REMOVE(&filter_info->fivetuple_list,
1490 filter_info->fivetuple_mask = 0;
1491 for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
1492 p_2tuple != NULL; p_2tuple = p_2tuple_next) {
1493 p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
1494 TAILQ_REMOVE(&filter_info->twotuple_list,
1498 filter_info->twotuple_mask = 0;
1500 if (!rte_intr_allow_others(intr_handle))
1501 /* resume to the default handler */
1502 rte_intr_callback_register(intr_handle,
1503 eth_igb_interrupt_handler,
1506 /* Clean datapath event and queue/vec mapping */
1507 rte_intr_efd_disable(intr_handle);
1508 if (intr_handle->intr_vec != NULL) {
1509 rte_free(intr_handle->intr_vec);
1510 intr_handle->intr_vec = NULL;
1515 eth_igb_dev_set_link_up(struct rte_eth_dev *dev)
1517 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1519 if (hw->phy.media_type == e1000_media_type_copper)
1520 e1000_power_up_phy(hw);
1522 e1000_power_up_fiber_serdes_link(hw);
1528 eth_igb_dev_set_link_down(struct rte_eth_dev *dev)
1530 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1532 if (hw->phy.media_type == e1000_media_type_copper)
1533 e1000_power_down_phy(hw);
1535 e1000_shutdown_fiber_serdes_link(hw);
1541 eth_igb_close(struct rte_eth_dev *dev)
1543 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1544 struct e1000_adapter *adapter =
1545 E1000_DEV_PRIVATE(dev->data->dev_private);
1546 struct rte_eth_link link;
1547 struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
1548 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1551 adapter->stopped = 1;
1553 e1000_phy_hw_reset(hw);
1554 igb_release_manageability(hw);
1555 igb_hw_control_release(hw);
1557 /* Clear bit for Go Link disconnect */
1558 if (hw->mac.type >= e1000_82580) {
1561 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1562 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1563 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1566 igb_dev_free_queues(dev);
1568 if (intr_handle->intr_vec) {
1569 rte_free(intr_handle->intr_vec);
1570 intr_handle->intr_vec = NULL;
1573 memset(&link, 0, sizeof(link));
1574 rte_igb_dev_atomic_write_link_status(dev, &link);
1578 igb_get_rx_buffer_size(struct e1000_hw *hw)
1580 uint32_t rx_buf_size;
1581 if (hw->mac.type == e1000_82576) {
1582 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1583 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1584 /* PBS needs to be translated according to a lookup table */
1585 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1586 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1587 rx_buf_size = (rx_buf_size << 10);
1588 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1589 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1591 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1597 /*********************************************************************
1599 * Initialize the hardware
1601 **********************************************************************/
1603 igb_hardware_init(struct e1000_hw *hw)
1605 uint32_t rx_buf_size;
1608 /* Let the firmware know the OS is in control */
1609 igb_hw_control_acquire(hw);
1612 * These parameters control the automatic generation (Tx) and
1613 * response (Rx) to Ethernet PAUSE frames.
1614 * - High water mark should allow for at least two standard size (1518)
1615 * frames to be received after sending an XOFF.
1616 * - Low water mark works best when it is very near the high water mark.
1617 * This allows the receiver to restart by sending XON when it has
1618 * drained a bit. Here we use an arbitrary value of 1500 which will
1619 * restart after one full frame is pulled from the buffer. There
1620 * could be several smaller frames in the buffer and if so they will
1621 * not trigger the XON until their total number reduces the buffer
1623 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1625 rx_buf_size = igb_get_rx_buffer_size(hw);
1627 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1628 hw->fc.low_water = hw->fc.high_water - 1500;
1629 hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1630 hw->fc.send_xon = 1;
1632 /* Set Flow control, use the tunable location if sane */
1633 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1634 hw->fc.requested_mode = igb_fc_setting;
1636 hw->fc.requested_mode = e1000_fc_none;
1638 /* Issue a global reset */
1639 igb_pf_reset_hw(hw);
1640 E1000_WRITE_REG(hw, E1000_WUC, 0);
1642 diag = e1000_init_hw(hw);
1646 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1647 e1000_get_phy_info(hw);
1648 e1000_check_for_link(hw);
1653 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1655 igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
1659 uint64_t old_gprc = stats->gprc;
1660 uint64_t old_gptc = stats->gptc;
1661 uint64_t old_tpr = stats->tpr;
1662 uint64_t old_tpt = stats->tpt;
1663 uint64_t old_rpthc = stats->rpthc;
1664 uint64_t old_hgptc = stats->hgptc;
1666 if(hw->phy.media_type == e1000_media_type_copper ||
1667 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1669 E1000_READ_REG(hw,E1000_SYMERRS);
1670 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1673 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1674 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1675 stats->scc += E1000_READ_REG(hw, E1000_SCC);
1676 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1678 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1679 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1680 stats->colc += E1000_READ_REG(hw, E1000_COLC);
1681 stats->dc += E1000_READ_REG(hw, E1000_DC);
1682 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1683 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1684 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1686 ** For watchdog management we need to know if we have been
1687 ** paused during the last interval, so capture that here.
1689 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1690 stats->xoffrxc += pause_frames;
1691 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1692 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1693 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1694 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1695 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1696 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1697 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1698 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1699 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1700 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1701 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1702 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1704 /* For the 64-bit byte counters the low dword must be read first. */
1705 /* Both registers clear on the read of the high dword */
1707 /* Workaround CRC bytes included in size, take away 4 bytes/packet */
1708 stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1709 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1710 stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN;
1711 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1712 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1713 stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN;
1715 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1716 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1717 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1718 stats->roc += E1000_READ_REG(hw, E1000_ROC);
1719 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1721 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1722 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1724 stats->tor += E1000_READ_REG(hw, E1000_TORL);
1725 stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
1726 stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN;
1727 stats->tot += E1000_READ_REG(hw, E1000_TOTL);
1728 stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
1729 stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN;
1731 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1732 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1733 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1734 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1735 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1736 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1737 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1738 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1740 /* Interrupt Counts */
1742 stats->iac += E1000_READ_REG(hw, E1000_IAC);
1743 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1744 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1745 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1746 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1747 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1748 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1749 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1750 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1752 /* Host to Card Statistics */
1754 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1755 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1756 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1757 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1758 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1759 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1760 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1761 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1762 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1763 stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN;
1764 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1765 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1766 stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN;
1767 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1768 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1769 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1771 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1772 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1773 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1774 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1775 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1776 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1780 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1782 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1783 struct e1000_hw_stats *stats =
1784 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1786 igb_read_stats_registers(hw, stats);
1788 if (rte_stats == NULL)
1792 rte_stats->imissed = stats->mpc;
1793 rte_stats->ierrors = stats->crcerrs +
1794 stats->rlec + stats->ruc + stats->roc +
1795 stats->rxerrc + stats->algnerrc + stats->cexterr;
1798 rte_stats->oerrors = stats->ecol + stats->latecol;
1800 rte_stats->ipackets = stats->gprc;
1801 rte_stats->opackets = stats->gptc;
1802 rte_stats->ibytes = stats->gorc;
1803 rte_stats->obytes = stats->gotc;
1807 eth_igb_stats_reset(struct rte_eth_dev *dev)
1809 struct e1000_hw_stats *hw_stats =
1810 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1812 /* HW registers are cleared on read */
1813 eth_igb_stats_get(dev, NULL);
1815 /* Reset software totals */
1816 memset(hw_stats, 0, sizeof(*hw_stats));
1820 eth_igb_xstats_reset(struct rte_eth_dev *dev)
1822 struct e1000_hw_stats *stats =
1823 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1825 /* HW registers are cleared on read */
1826 eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS);
1828 /* Reset software totals */
1829 memset(stats, 0, sizeof(*stats));
1832 static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1833 struct rte_eth_xstat_name *xstats_names,
1834 __rte_unused unsigned limit)
1838 if (xstats_names == NULL)
1839 return IGB_NB_XSTATS;
1841 /* Note: limit checked in rte_eth_xstats_names() */
1843 for (i = 0; i < IGB_NB_XSTATS; i++) {
1844 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
1845 "%s", rte_igb_stats_strings[i].name);
1848 return IGB_NB_XSTATS;
1852 eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1855 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1856 struct e1000_hw_stats *hw_stats =
1857 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1860 if (n < IGB_NB_XSTATS)
1861 return IGB_NB_XSTATS;
1863 igb_read_stats_registers(hw, hw_stats);
1865 /* If this is a reset xstats is NULL, and we have cleared the
1866 * registers by reading them.
1871 /* Extended stats */
1872 for (i = 0; i < IGB_NB_XSTATS; i++) {
1874 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1875 rte_igb_stats_strings[i].offset);
1878 return IGB_NB_XSTATS;
1882 igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
1884 /* Good Rx packets, include VF loopback */
1885 UPDATE_VF_STAT(E1000_VFGPRC,
1886 hw_stats->last_gprc, hw_stats->gprc);
1888 /* Good Rx octets, include VF loopback */
1889 UPDATE_VF_STAT(E1000_VFGORC,
1890 hw_stats->last_gorc, hw_stats->gorc);
1892 /* Good Tx packets, include VF loopback */
1893 UPDATE_VF_STAT(E1000_VFGPTC,
1894 hw_stats->last_gptc, hw_stats->gptc);
1896 /* Good Tx octets, include VF loopback */
1897 UPDATE_VF_STAT(E1000_VFGOTC,
1898 hw_stats->last_gotc, hw_stats->gotc);
1900 /* Rx Multicst packets */
1901 UPDATE_VF_STAT(E1000_VFMPRC,
1902 hw_stats->last_mprc, hw_stats->mprc);
1904 /* Good Rx loopback packets */
1905 UPDATE_VF_STAT(E1000_VFGPRLBC,
1906 hw_stats->last_gprlbc, hw_stats->gprlbc);
1908 /* Good Rx loopback octets */
1909 UPDATE_VF_STAT(E1000_VFGORLBC,
1910 hw_stats->last_gorlbc, hw_stats->gorlbc);
1912 /* Good Tx loopback packets */
1913 UPDATE_VF_STAT(E1000_VFGPTLBC,
1914 hw_stats->last_gptlbc, hw_stats->gptlbc);
1916 /* Good Tx loopback octets */
1917 UPDATE_VF_STAT(E1000_VFGOTLBC,
1918 hw_stats->last_gotlbc, hw_stats->gotlbc);
1921 static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1922 struct rte_eth_xstat_name *xstats_names,
1923 __rte_unused unsigned limit)
1927 if (xstats_names != NULL)
1928 for (i = 0; i < IGBVF_NB_XSTATS; i++) {
1929 snprintf(xstats_names[i].name,
1930 sizeof(xstats_names[i].name), "%s",
1931 rte_igbvf_stats_strings[i].name);
1933 return IGBVF_NB_XSTATS;
1937 eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1940 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1941 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
1942 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1945 if (n < IGBVF_NB_XSTATS)
1946 return IGBVF_NB_XSTATS;
1948 igbvf_read_stats_registers(hw, hw_stats);
1953 for (i = 0; i < IGBVF_NB_XSTATS; i++) {
1955 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1956 rte_igbvf_stats_strings[i].offset);
1959 return IGBVF_NB_XSTATS;
1963 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1965 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1966 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
1967 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1969 igbvf_read_stats_registers(hw, hw_stats);
1971 if (rte_stats == NULL)
1974 rte_stats->ipackets = hw_stats->gprc;
1975 rte_stats->ibytes = hw_stats->gorc;
1976 rte_stats->opackets = hw_stats->gptc;
1977 rte_stats->obytes = hw_stats->gotc;
1981 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1983 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1984 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1986 /* Sync HW register to the last stats */
1987 eth_igbvf_stats_get(dev, NULL);
1989 /* reset HW current stats*/
1990 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1991 offsetof(struct e1000_vf_stats, gprc));
1995 eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1998 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1999 struct e1000_fw_version fw;
2002 e1000_get_fw_version(hw, &fw);
2004 switch (hw->mac.type) {
2007 if (!(e1000_get_flash_presence_i210(hw))) {
2008 ret = snprintf(fw_version, fw_size,
2010 fw.invm_major, fw.invm_minor,
2016 /* if option rom is valid, display its version too */
2018 ret = snprintf(fw_version, fw_size,
2019 "%d.%d, 0x%08x, %d.%d.%d",
2020 fw.eep_major, fw.eep_minor, fw.etrack_id,
2021 fw.or_major, fw.or_build, fw.or_patch);
2024 if (fw.etrack_id != 0X0000) {
2025 ret = snprintf(fw_version, fw_size,
2027 fw.eep_major, fw.eep_minor,
2030 ret = snprintf(fw_version, fw_size,
2032 fw.eep_major, fw.eep_minor,
2039 ret += 1; /* add the size of '\0' */
2040 if (fw_size < (u32)ret)
2047 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2049 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2051 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
2052 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
2053 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
2054 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
2055 dev_info->rx_offload_capa =
2056 DEV_RX_OFFLOAD_VLAN_STRIP |
2057 DEV_RX_OFFLOAD_IPV4_CKSUM |
2058 DEV_RX_OFFLOAD_UDP_CKSUM |
2059 DEV_RX_OFFLOAD_TCP_CKSUM;
2060 dev_info->tx_offload_capa =
2061 DEV_TX_OFFLOAD_VLAN_INSERT |
2062 DEV_TX_OFFLOAD_IPV4_CKSUM |
2063 DEV_TX_OFFLOAD_UDP_CKSUM |
2064 DEV_TX_OFFLOAD_TCP_CKSUM |
2065 DEV_TX_OFFLOAD_SCTP_CKSUM |
2066 DEV_TX_OFFLOAD_TCP_TSO;
2068 switch (hw->mac.type) {
2070 dev_info->max_rx_queues = 4;
2071 dev_info->max_tx_queues = 4;
2072 dev_info->max_vmdq_pools = 0;
2076 dev_info->max_rx_queues = 16;
2077 dev_info->max_tx_queues = 16;
2078 dev_info->max_vmdq_pools = ETH_8_POOLS;
2079 dev_info->vmdq_queue_num = 16;
2083 dev_info->max_rx_queues = 8;
2084 dev_info->max_tx_queues = 8;
2085 dev_info->max_vmdq_pools = ETH_8_POOLS;
2086 dev_info->vmdq_queue_num = 8;
2090 dev_info->max_rx_queues = 8;
2091 dev_info->max_tx_queues = 8;
2092 dev_info->max_vmdq_pools = ETH_8_POOLS;
2093 dev_info->vmdq_queue_num = 8;
2097 dev_info->max_rx_queues = 8;
2098 dev_info->max_tx_queues = 8;
2102 dev_info->max_rx_queues = 4;
2103 dev_info->max_tx_queues = 4;
2104 dev_info->max_vmdq_pools = 0;
2108 dev_info->max_rx_queues = 2;
2109 dev_info->max_tx_queues = 2;
2110 dev_info->max_vmdq_pools = 0;
2114 /* Should not happen */
2117 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
2118 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2119 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
2121 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2123 .pthresh = IGB_DEFAULT_RX_PTHRESH,
2124 .hthresh = IGB_DEFAULT_RX_HTHRESH,
2125 .wthresh = IGB_DEFAULT_RX_WTHRESH,
2127 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
2131 dev_info->default_txconf = (struct rte_eth_txconf) {
2133 .pthresh = IGB_DEFAULT_TX_PTHRESH,
2134 .hthresh = IGB_DEFAULT_TX_HTHRESH,
2135 .wthresh = IGB_DEFAULT_TX_WTHRESH,
2140 dev_info->rx_desc_lim = rx_desc_lim;
2141 dev_info->tx_desc_lim = tx_desc_lim;
2143 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
2144 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
2148 static const uint32_t *
2149 eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
2151 static const uint32_t ptypes[] = {
2152 /* refers to igb_rxd_pkt_info_to_pkt_type() */
2155 RTE_PTYPE_L3_IPV4_EXT,
2157 RTE_PTYPE_L3_IPV6_EXT,
2161 RTE_PTYPE_TUNNEL_IP,
2162 RTE_PTYPE_INNER_L3_IPV6,
2163 RTE_PTYPE_INNER_L3_IPV6_EXT,
2164 RTE_PTYPE_INNER_L4_TCP,
2165 RTE_PTYPE_INNER_L4_UDP,
2169 if (dev->rx_pkt_burst == eth_igb_recv_pkts ||
2170 dev->rx_pkt_burst == eth_igb_recv_scattered_pkts)
2176 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2178 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2180 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
2181 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
2182 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
2183 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
2184 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
2185 DEV_RX_OFFLOAD_IPV4_CKSUM |
2186 DEV_RX_OFFLOAD_UDP_CKSUM |
2187 DEV_RX_OFFLOAD_TCP_CKSUM;
2188 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
2189 DEV_TX_OFFLOAD_IPV4_CKSUM |
2190 DEV_TX_OFFLOAD_UDP_CKSUM |
2191 DEV_TX_OFFLOAD_TCP_CKSUM |
2192 DEV_TX_OFFLOAD_SCTP_CKSUM |
2193 DEV_TX_OFFLOAD_TCP_TSO;
2194 switch (hw->mac.type) {
2196 dev_info->max_rx_queues = 2;
2197 dev_info->max_tx_queues = 2;
2199 case e1000_vfadapt_i350:
2200 dev_info->max_rx_queues = 1;
2201 dev_info->max_tx_queues = 1;
2204 /* Should not happen */
2208 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2210 .pthresh = IGB_DEFAULT_RX_PTHRESH,
2211 .hthresh = IGB_DEFAULT_RX_HTHRESH,
2212 .wthresh = IGB_DEFAULT_RX_WTHRESH,
2214 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
2218 dev_info->default_txconf = (struct rte_eth_txconf) {
2220 .pthresh = IGB_DEFAULT_TX_PTHRESH,
2221 .hthresh = IGB_DEFAULT_TX_HTHRESH,
2222 .wthresh = IGB_DEFAULT_TX_WTHRESH,
2227 dev_info->rx_desc_lim = rx_desc_lim;
2228 dev_info->tx_desc_lim = tx_desc_lim;
2231 /* return 0 means link status changed, -1 means not changed */
2233 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2235 struct e1000_hw *hw =
2236 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2237 struct rte_eth_link link, old;
2238 int link_check, count;
2241 hw->mac.get_link_status = 1;
2243 /* possible wait-to-complete in up to 9 seconds */
2244 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
2245 /* Read the real link status */
2246 switch (hw->phy.media_type) {
2247 case e1000_media_type_copper:
2248 /* Do the work to read phy */
2249 e1000_check_for_link(hw);
2250 link_check = !hw->mac.get_link_status;
2253 case e1000_media_type_fiber:
2254 e1000_check_for_link(hw);
2255 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2259 case e1000_media_type_internal_serdes:
2260 e1000_check_for_link(hw);
2261 link_check = hw->mac.serdes_has_link;
2264 /* VF device is type_unknown */
2265 case e1000_media_type_unknown:
2266 eth_igbvf_link_update(hw);
2267 link_check = !hw->mac.get_link_status;
2273 if (link_check || wait_to_complete == 0)
2275 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
2277 memset(&link, 0, sizeof(link));
2278 rte_igb_dev_atomic_read_link_status(dev, &link);
2281 /* Now we check if a transition has happened */
2283 uint16_t duplex, speed;
2284 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
2285 link.link_duplex = (duplex == FULL_DUPLEX) ?
2286 ETH_LINK_FULL_DUPLEX :
2287 ETH_LINK_HALF_DUPLEX;
2288 link.link_speed = speed;
2289 link.link_status = ETH_LINK_UP;
2290 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2291 ETH_LINK_SPEED_FIXED);
2292 } else if (!link_check) {
2293 link.link_speed = 0;
2294 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2295 link.link_status = ETH_LINK_DOWN;
2296 link.link_autoneg = ETH_LINK_SPEED_FIXED;
2298 rte_igb_dev_atomic_write_link_status(dev, &link);
2301 if (old.link_status == link.link_status)
2309 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
2310 * For ASF and Pass Through versions of f/w this means
2311 * that the driver is loaded.
2314 igb_hw_control_acquire(struct e1000_hw *hw)
2318 /* Let firmware know the driver has taken over */
2319 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2320 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2324 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
2325 * For ASF and Pass Through versions of f/w this means that the
2326 * driver is no longer loaded.
2329 igb_hw_control_release(struct e1000_hw *hw)
2333 /* Let firmware taken over control of h/w */
2334 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2335 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
2336 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2340 * Bit of a misnomer, what this really means is
2341 * to enable OS management of the system... aka
2342 * to disable special hardware management features.
2345 igb_init_manageability(struct e1000_hw *hw)
2347 if (e1000_enable_mng_pass_thru(hw)) {
2348 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
2349 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2351 /* disable hardware interception of ARP */
2352 manc &= ~(E1000_MANC_ARP_EN);
2354 /* enable receiving management packets to the host */
2355 manc |= E1000_MANC_EN_MNG2HOST;
2356 manc2h |= 1 << 5; /* Mng Port 623 */
2357 manc2h |= 1 << 6; /* Mng Port 664 */
2358 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
2359 E1000_WRITE_REG(hw, E1000_MANC, manc);
2364 igb_release_manageability(struct e1000_hw *hw)
2366 if (e1000_enable_mng_pass_thru(hw)) {
2367 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2369 manc |= E1000_MANC_ARP_EN;
2370 manc &= ~E1000_MANC_EN_MNG2HOST;
2372 E1000_WRITE_REG(hw, E1000_MANC, manc);
2377 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
2379 struct e1000_hw *hw =
2380 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2383 rctl = E1000_READ_REG(hw, E1000_RCTL);
2384 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2385 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2389 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
2391 struct e1000_hw *hw =
2392 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2395 rctl = E1000_READ_REG(hw, E1000_RCTL);
2396 rctl &= (~E1000_RCTL_UPE);
2397 if (dev->data->all_multicast == 1)
2398 rctl |= E1000_RCTL_MPE;
2400 rctl &= (~E1000_RCTL_MPE);
2401 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2405 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
2407 struct e1000_hw *hw =
2408 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2411 rctl = E1000_READ_REG(hw, E1000_RCTL);
2412 rctl |= E1000_RCTL_MPE;
2413 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2417 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
2419 struct e1000_hw *hw =
2420 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2423 if (dev->data->promiscuous == 1)
2424 return; /* must remain in all_multicast mode */
2425 rctl = E1000_READ_REG(hw, E1000_RCTL);
2426 rctl &= (~E1000_RCTL_MPE);
2427 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2431 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2433 struct e1000_hw *hw =
2434 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2435 struct e1000_vfta * shadow_vfta =
2436 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2441 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
2442 E1000_VFTA_ENTRY_MASK);
2443 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
2444 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
2449 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
2451 /* update local VFTA copy */
2452 shadow_vfta->vfta[vid_idx] = vfta;
2458 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
2459 enum rte_vlan_type vlan_type,
2462 struct e1000_hw *hw =
2463 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2466 qinq = E1000_READ_REG(hw, E1000_CTRL_EXT);
2467 qinq &= E1000_CTRL_EXT_EXT_VLAN;
2469 /* only outer TPID of double VLAN can be configured*/
2470 if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
2471 reg = E1000_READ_REG(hw, E1000_VET);
2472 reg = (reg & (~E1000_VET_VET_EXT)) |
2473 ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
2474 E1000_WRITE_REG(hw, E1000_VET, reg);
2479 /* all other TPID values are read-only*/
2480 PMD_DRV_LOG(ERR, "Not supported");
2486 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2488 struct e1000_hw *hw =
2489 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2492 /* Filter Table Disable */
2493 reg = E1000_READ_REG(hw, E1000_RCTL);
2494 reg &= ~E1000_RCTL_CFIEN;
2495 reg &= ~E1000_RCTL_VFE;
2496 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2500 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2502 struct e1000_hw *hw =
2503 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2504 struct e1000_vfta * shadow_vfta =
2505 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2509 /* Filter Table Enable, CFI not used for packet acceptance */
2510 reg = E1000_READ_REG(hw, E1000_RCTL);
2511 reg &= ~E1000_RCTL_CFIEN;
2512 reg |= E1000_RCTL_VFE;
2513 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2515 /* restore VFTA table */
2516 for (i = 0; i < IGB_VFTA_SIZE; i++)
2517 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
2521 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2523 struct e1000_hw *hw =
2524 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2527 /* VLAN Mode Disable */
2528 reg = E1000_READ_REG(hw, E1000_CTRL);
2529 reg &= ~E1000_CTRL_VME;
2530 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2534 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2536 struct e1000_hw *hw =
2537 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2540 /* VLAN Mode Enable */
2541 reg = E1000_READ_REG(hw, E1000_CTRL);
2542 reg |= E1000_CTRL_VME;
2543 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2547 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2549 struct e1000_hw *hw =
2550 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2553 /* CTRL_EXT: Extended VLAN */
2554 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2555 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
2556 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2558 /* Update maximum packet length */
2559 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
2560 E1000_WRITE_REG(hw, E1000_RLPML,
2561 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2566 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2568 struct e1000_hw *hw =
2569 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2572 /* CTRL_EXT: Extended VLAN */
2573 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2574 reg |= E1000_CTRL_EXT_EXTEND_VLAN;
2575 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2577 /* Update maximum packet length */
2578 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
2579 E1000_WRITE_REG(hw, E1000_RLPML,
2580 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2585 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2587 if(mask & ETH_VLAN_STRIP_MASK){
2588 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2589 igb_vlan_hw_strip_enable(dev);
2591 igb_vlan_hw_strip_disable(dev);
2594 if(mask & ETH_VLAN_FILTER_MASK){
2595 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2596 igb_vlan_hw_filter_enable(dev);
2598 igb_vlan_hw_filter_disable(dev);
2601 if(mask & ETH_VLAN_EXTEND_MASK){
2602 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2603 igb_vlan_hw_extend_enable(dev);
2605 igb_vlan_hw_extend_disable(dev);
2611 * It enables the interrupt mask and then enable the interrupt.
2614 * Pointer to struct rte_eth_dev.
2617 * - On success, zero.
2618 * - On failure, a negative value.
2621 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
2623 struct e1000_interrupt *intr =
2624 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2626 intr->mask |= E1000_ICR_LSC;
2631 /* It clears the interrupt causes and enables the interrupt.
2632 * It will be called once only during nic initialized.
2635 * Pointer to struct rte_eth_dev.
2638 * - On success, zero.
2639 * - On failure, a negative value.
2641 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
2643 uint32_t mask, regval;
2644 struct e1000_hw *hw =
2645 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2646 struct rte_eth_dev_info dev_info;
2648 memset(&dev_info, 0, sizeof(dev_info));
2649 eth_igb_infos_get(dev, &dev_info);
2651 mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
2652 regval = E1000_READ_REG(hw, E1000_EIMS);
2653 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
2659 * It reads ICR and gets interrupt causes, check it and set a bit flag
2660 * to update link status.
2663 * Pointer to struct rte_eth_dev.
2666 * - On success, zero.
2667 * - On failure, a negative value.
2670 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
2673 struct e1000_hw *hw =
2674 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2675 struct e1000_interrupt *intr =
2676 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2678 igb_intr_disable(hw);
2680 /* read-on-clear nic registers here */
2681 icr = E1000_READ_REG(hw, E1000_ICR);
2684 if (icr & E1000_ICR_LSC) {
2685 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
2688 if (icr & E1000_ICR_VMMB)
2689 intr->flags |= E1000_FLAG_MAILBOX;
2695 * It executes link_update after knowing an interrupt is prsent.
2698 * Pointer to struct rte_eth_dev.
2701 * - On success, zero.
2702 * - On failure, a negative value.
2705 eth_igb_interrupt_action(struct rte_eth_dev *dev,
2706 struct rte_intr_handle *intr_handle)
2708 struct e1000_hw *hw =
2709 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2710 struct e1000_interrupt *intr =
2711 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2712 struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
2713 uint32_t tctl, rctl;
2714 struct rte_eth_link link;
2717 if (intr->flags & E1000_FLAG_MAILBOX) {
2718 igb_pf_mbx_process(dev);
2719 intr->flags &= ~E1000_FLAG_MAILBOX;
2722 igb_intr_enable(dev);
2723 rte_intr_enable(intr_handle);
2725 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
2726 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
2728 /* set get_link_status to check register later */
2729 hw->mac.get_link_status = 1;
2730 ret = eth_igb_link_update(dev, 0);
2732 /* check if link has changed */
2736 memset(&link, 0, sizeof(link));
2737 rte_igb_dev_atomic_read_link_status(dev, &link);
2738 if (link.link_status) {
2740 " Port %d: Link Up - speed %u Mbps - %s",
2742 (unsigned)link.link_speed,
2743 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2744 "full-duplex" : "half-duplex");
2746 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2747 dev->data->port_id);
2750 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
2751 pci_dev->addr.domain,
2753 pci_dev->addr.devid,
2754 pci_dev->addr.function);
2755 tctl = E1000_READ_REG(hw, E1000_TCTL);
2756 rctl = E1000_READ_REG(hw, E1000_RCTL);
2757 if (link.link_status) {
2759 tctl |= E1000_TCTL_EN;
2760 rctl |= E1000_RCTL_EN;
2763 tctl &= ~E1000_TCTL_EN;
2764 rctl &= ~E1000_RCTL_EN;
2766 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2767 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2768 E1000_WRITE_FLUSH(hw);
2769 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2776 * Interrupt handler which shall be registered at first.
2779 * Pointer to interrupt handle.
2781 * The address of parameter (struct rte_eth_dev *) regsitered before.
2787 eth_igb_interrupt_handler(struct rte_intr_handle *handle, void *param)
2789 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2791 eth_igb_interrupt_get_status(dev);
2792 eth_igb_interrupt_action(dev, handle);
2796 eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev)
2799 struct e1000_hw *hw =
2800 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2801 struct e1000_interrupt *intr =
2802 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2804 igbvf_intr_disable(hw);
2806 /* read-on-clear nic registers here */
2807 eicr = E1000_READ_REG(hw, E1000_EICR);
2810 if (eicr == E1000_VTIVAR_MISC_MAILBOX)
2811 intr->flags |= E1000_FLAG_MAILBOX;
2816 void igbvf_mbx_process(struct rte_eth_dev *dev)
2818 struct e1000_hw *hw =
2819 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2820 struct e1000_mbx_info *mbx = &hw->mbx;
2823 if (mbx->ops.read(hw, &in_msg, 1, 0))
2826 /* PF reset VF event */
2827 if (in_msg == E1000_PF_CONTROL_MSG)
2828 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
2832 eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle)
2834 struct e1000_interrupt *intr =
2835 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2837 if (intr->flags & E1000_FLAG_MAILBOX) {
2838 igbvf_mbx_process(dev);
2839 intr->flags &= ~E1000_FLAG_MAILBOX;
2842 igbvf_intr_enable(dev);
2843 rte_intr_enable(intr_handle);
2849 eth_igbvf_interrupt_handler(struct rte_intr_handle *handle,
2852 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2854 eth_igbvf_interrupt_get_status(dev);
2855 eth_igbvf_interrupt_action(dev, handle);
2859 eth_igb_led_on(struct rte_eth_dev *dev)
2861 struct e1000_hw *hw;
2863 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2864 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
2868 eth_igb_led_off(struct rte_eth_dev *dev)
2870 struct e1000_hw *hw;
2872 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2873 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
2877 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2879 struct e1000_hw *hw;
2884 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2885 fc_conf->pause_time = hw->fc.pause_time;
2886 fc_conf->high_water = hw->fc.high_water;
2887 fc_conf->low_water = hw->fc.low_water;
2888 fc_conf->send_xon = hw->fc.send_xon;
2889 fc_conf->autoneg = hw->mac.autoneg;
2892 * Return rx_pause and tx_pause status according to actual setting of
2893 * the TFCE and RFCE bits in the CTRL register.
2895 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2896 if (ctrl & E1000_CTRL_TFCE)
2901 if (ctrl & E1000_CTRL_RFCE)
2906 if (rx_pause && tx_pause)
2907 fc_conf->mode = RTE_FC_FULL;
2909 fc_conf->mode = RTE_FC_RX_PAUSE;
2911 fc_conf->mode = RTE_FC_TX_PAUSE;
2913 fc_conf->mode = RTE_FC_NONE;
2919 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2921 struct e1000_hw *hw;
2923 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
2929 uint32_t rx_buf_size;
2930 uint32_t max_high_water;
2933 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2934 if (fc_conf->autoneg != hw->mac.autoneg)
2936 rx_buf_size = igb_get_rx_buffer_size(hw);
2937 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2939 /* At least reserve one Ethernet frame for watermark */
2940 max_high_water = rx_buf_size - ETHER_MAX_LEN;
2941 if ((fc_conf->high_water > max_high_water) ||
2942 (fc_conf->high_water < fc_conf->low_water)) {
2943 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
2944 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
2948 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
2949 hw->fc.pause_time = fc_conf->pause_time;
2950 hw->fc.high_water = fc_conf->high_water;
2951 hw->fc.low_water = fc_conf->low_water;
2952 hw->fc.send_xon = fc_conf->send_xon;
2954 err = e1000_setup_link_generic(hw);
2955 if (err == E1000_SUCCESS) {
2957 /* check if we want to forward MAC frames - driver doesn't have native
2958 * capability to do that, so we'll write the registers ourselves */
2960 rctl = E1000_READ_REG(hw, E1000_RCTL);
2962 /* set or clear MFLCN.PMCF bit depending on configuration */
2963 if (fc_conf->mac_ctrl_frame_fwd != 0)
2964 rctl |= E1000_RCTL_PMCF;
2966 rctl &= ~E1000_RCTL_PMCF;
2968 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2969 E1000_WRITE_FLUSH(hw);
2974 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
2978 #define E1000_RAH_POOLSEL_SHIFT (18)
2980 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2981 uint32_t index, __rte_unused uint32_t pool)
2983 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2986 e1000_rar_set(hw, mac_addr->addr_bytes, index);
2987 rah = E1000_READ_REG(hw, E1000_RAH(index));
2988 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
2989 E1000_WRITE_REG(hw, E1000_RAH(index), rah);
2993 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
2995 uint8_t addr[ETHER_ADDR_LEN];
2996 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2998 memset(addr, 0, sizeof(addr));
3000 e1000_rar_set(hw, addr, index);
3004 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
3005 struct ether_addr *addr)
3007 eth_igb_rar_clear(dev, 0);
3009 eth_igb_rar_set(dev, (void *)addr, 0, 0);
3012 * Virtual Function operations
3015 igbvf_intr_disable(struct e1000_hw *hw)
3017 PMD_INIT_FUNC_TRACE();
3019 /* Clear interrupt mask to stop from interrupts being generated */
3020 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
3022 E1000_WRITE_FLUSH(hw);
3026 igbvf_stop_adapter(struct rte_eth_dev *dev)
3030 struct rte_eth_dev_info dev_info;
3031 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3033 memset(&dev_info, 0, sizeof(dev_info));
3034 eth_igbvf_infos_get(dev, &dev_info);
3036 /* Clear interrupt mask to stop from interrupts being generated */
3037 igbvf_intr_disable(hw);
3039 /* Clear any pending interrupts, flush previous writes */
3040 E1000_READ_REG(hw, E1000_EICR);
3042 /* Disable the transmit unit. Each queue must be disabled. */
3043 for (i = 0; i < dev_info.max_tx_queues; i++)
3044 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
3046 /* Disable the receive unit by stopping each queue */
3047 for (i = 0; i < dev_info.max_rx_queues; i++) {
3048 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
3049 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
3050 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
3051 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
3055 /* flush all queues disables */
3056 E1000_WRITE_FLUSH(hw);
3060 static int eth_igbvf_link_update(struct e1000_hw *hw)
3062 struct e1000_mbx_info *mbx = &hw->mbx;
3063 struct e1000_mac_info *mac = &hw->mac;
3064 int ret_val = E1000_SUCCESS;
3066 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
3069 * We only want to run this if there has been a rst asserted.
3070 * in this case that could mean a link change, device reset,
3071 * or a virtual function reset
3074 /* If we were hit with a reset or timeout drop the link */
3075 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
3076 mac->get_link_status = TRUE;
3078 if (!mac->get_link_status)
3081 /* if link status is down no point in checking to see if pf is up */
3082 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
3085 /* if we passed all the tests above then the link is up and we no
3086 * longer need to check for link */
3087 mac->get_link_status = FALSE;
3095 igbvf_dev_configure(struct rte_eth_dev *dev)
3097 struct rte_eth_conf* conf = &dev->data->dev_conf;
3099 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
3100 dev->data->port_id);
3103 * VF has no ability to enable/disable HW CRC
3104 * Keep the persistent behavior the same as Host PF
3106 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
3107 if (!conf->rxmode.hw_strip_crc) {
3108 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
3109 conf->rxmode.hw_strip_crc = 1;
3112 if (conf->rxmode.hw_strip_crc) {
3113 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
3114 conf->rxmode.hw_strip_crc = 0;
3122 igbvf_dev_start(struct rte_eth_dev *dev)
3124 struct e1000_hw *hw =
3125 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3126 struct e1000_adapter *adapter =
3127 E1000_DEV_PRIVATE(dev->data->dev_private);
3128 struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
3129 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3131 uint32_t intr_vector = 0;
3133 PMD_INIT_FUNC_TRACE();
3135 hw->mac.ops.reset_hw(hw);
3136 adapter->stopped = 0;
3139 igbvf_set_vfta_all(dev,1);
3141 eth_igbvf_tx_init(dev);
3143 /* This can fail when allocating mbufs for descriptor rings */
3144 ret = eth_igbvf_rx_init(dev);
3146 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
3147 igb_dev_clear_queues(dev);
3151 /* check and configure queue intr-vector mapping */
3152 if (dev->data->dev_conf.intr_conf.rxq != 0) {
3153 intr_vector = dev->data->nb_rx_queues;
3154 ret = rte_intr_efd_enable(intr_handle, intr_vector);
3159 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3160 intr_handle->intr_vec =
3161 rte_zmalloc("intr_vec",
3162 dev->data->nb_rx_queues * sizeof(int), 0);
3163 if (!intr_handle->intr_vec) {
3164 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
3165 " intr_vec", dev->data->nb_rx_queues);
3170 eth_igbvf_configure_msix_intr(dev);
3172 /* enable uio/vfio intr/eventfd mapping */
3173 rte_intr_enable(intr_handle);
3175 /* resume enabled intr since hw reset */
3176 igbvf_intr_enable(dev);
3182 igbvf_dev_stop(struct rte_eth_dev *dev)
3184 struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
3185 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3187 PMD_INIT_FUNC_TRACE();
3189 igbvf_stop_adapter(dev);
3192 * Clear what we set, but we still keep shadow_vfta to
3193 * restore after device starts
3195 igbvf_set_vfta_all(dev,0);
3197 igb_dev_clear_queues(dev);
3199 /* disable intr eventfd mapping */
3200 rte_intr_disable(intr_handle);
3202 /* Clean datapath event and queue/vec mapping */
3203 rte_intr_efd_disable(intr_handle);
3204 if (intr_handle->intr_vec) {
3205 rte_free(intr_handle->intr_vec);
3206 intr_handle->intr_vec = NULL;
3211 igbvf_dev_close(struct rte_eth_dev *dev)
3213 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3214 struct e1000_adapter *adapter =
3215 E1000_DEV_PRIVATE(dev->data->dev_private);
3216 struct ether_addr addr;
3218 PMD_INIT_FUNC_TRACE();
3222 igbvf_dev_stop(dev);
3223 adapter->stopped = 1;
3224 igb_dev_free_queues(dev);
3227 * reprogram the RAR with a zero mac address,
3228 * to ensure that the VF traffic goes to the PF
3229 * after stop, close and detach of the VF.
3232 memset(&addr, 0, sizeof(addr));
3233 igbvf_default_mac_addr_set(dev, &addr);
3237 igbvf_promiscuous_enable(struct rte_eth_dev *dev)
3239 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3241 /* Set both unicast and multicast promisc */
3242 e1000_promisc_set_vf(hw, e1000_promisc_enabled);
3246 igbvf_promiscuous_disable(struct rte_eth_dev *dev)
3248 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3250 /* If in allmulticast mode leave multicast promisc */
3251 if (dev->data->all_multicast == 1)
3252 e1000_promisc_set_vf(hw, e1000_promisc_multicast);
3254 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
3258 igbvf_allmulticast_enable(struct rte_eth_dev *dev)
3260 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3262 /* In promiscuous mode multicast promisc already set */
3263 if (dev->data->promiscuous == 0)
3264 e1000_promisc_set_vf(hw, e1000_promisc_multicast);
3268 igbvf_allmulticast_disable(struct rte_eth_dev *dev)
3270 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3272 /* In promiscuous mode leave multicast promisc enabled */
3273 if (dev->data->promiscuous == 0)
3274 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
3277 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
3279 struct e1000_mbx_info *mbx = &hw->mbx;
3283 /* After set vlan, vlan strip will also be enabled in igb driver*/
3284 msgbuf[0] = E1000_VF_SET_VLAN;
3286 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
3288 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
3290 err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
3294 err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
3298 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
3299 if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))
3306 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3308 struct e1000_hw *hw =
3309 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3310 struct e1000_vfta * shadow_vfta =
3311 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3312 int i = 0, j = 0, vfta = 0, mask = 1;
3314 for (i = 0; i < IGB_VFTA_SIZE; i++){
3315 vfta = shadow_vfta->vfta[i];
3318 for (j = 0; j < 32; j++){
3321 (uint16_t)((i<<5)+j), on);
3330 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3332 struct e1000_hw *hw =
3333 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3334 struct e1000_vfta * shadow_vfta =
3335 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3336 uint32_t vid_idx = 0;
3337 uint32_t vid_bit = 0;
3340 PMD_INIT_FUNC_TRACE();
3342 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
3343 ret = igbvf_set_vfta(hw, vlan_id, !!on);
3345 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
3348 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3349 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3351 /*Save what we set and retore it after device reset*/
3353 shadow_vfta->vfta[vid_idx] |= vid_bit;
3355 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
3361 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
3363 struct e1000_hw *hw =
3364 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3366 /* index is not used by rar_set() */
3367 hw->mac.ops.rar_set(hw, (void *)addr, 0);
3372 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
3373 struct rte_eth_rss_reta_entry64 *reta_conf,
3378 uint16_t idx, shift;
3379 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3381 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3382 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3383 "(%d) doesn't match the number hardware can supported "
3384 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3388 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3389 idx = i / RTE_RETA_GROUP_SIZE;
3390 shift = i % RTE_RETA_GROUP_SIZE;
3391 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3395 if (mask == IGB_4_BIT_MASK)
3398 r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3399 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
3400 if (mask & (0x1 << j))
3401 reta |= reta_conf[idx].reta[shift + j] <<
3404 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
3406 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
3413 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
3414 struct rte_eth_rss_reta_entry64 *reta_conf,
3419 uint16_t idx, shift;
3420 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3422 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3423 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3424 "(%d) doesn't match the number hardware can supported "
3425 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3429 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3430 idx = i / RTE_RETA_GROUP_SIZE;
3431 shift = i % RTE_RETA_GROUP_SIZE;
3432 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3436 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3437 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
3438 if (mask & (0x1 << j))
3439 reta_conf[idx].reta[shift + j] =
3440 ((reta >> (CHAR_BIT * j)) &
3448 #define MAC_TYPE_FILTER_SUP(type) do {\
3449 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
3450 (type) != e1000_82576)\
3455 eth_igb_syn_filter_set(struct rte_eth_dev *dev,
3456 struct rte_eth_syn_filter *filter,
3459 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3460 uint32_t synqf, rfctl;
3462 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3465 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
3468 if (synqf & E1000_SYN_FILTER_ENABLE)
3471 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
3472 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
3474 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3475 if (filter->hig_pri)
3476 rfctl |= E1000_RFCTL_SYNQFP;
3478 rfctl &= ~E1000_RFCTL_SYNQFP;
3480 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
3482 if (!(synqf & E1000_SYN_FILTER_ENABLE))
3487 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
3488 E1000_WRITE_FLUSH(hw);
3493 eth_igb_syn_filter_get(struct rte_eth_dev *dev,
3494 struct rte_eth_syn_filter *filter)
3496 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3497 uint32_t synqf, rfctl;
3499 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
3500 if (synqf & E1000_SYN_FILTER_ENABLE) {
3501 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3502 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
3503 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
3504 E1000_SYN_FILTER_QUEUE_SHIFT);
3512 eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
3513 enum rte_filter_op filter_op,
3516 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3519 MAC_TYPE_FILTER_SUP(hw->mac.type);
3521 if (filter_op == RTE_ETH_FILTER_NOP)
3525 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
3530 switch (filter_op) {
3531 case RTE_ETH_FILTER_ADD:
3532 ret = eth_igb_syn_filter_set(dev,
3533 (struct rte_eth_syn_filter *)arg,
3536 case RTE_ETH_FILTER_DELETE:
3537 ret = eth_igb_syn_filter_set(dev,
3538 (struct rte_eth_syn_filter *)arg,
3541 case RTE_ETH_FILTER_GET:
3542 ret = eth_igb_syn_filter_get(dev,
3543 (struct rte_eth_syn_filter *)arg);
3546 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
3554 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
3555 if ((type) != e1000_82580 && (type) != e1000_i350)\
3559 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
3561 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
3562 struct e1000_2tuple_filter_info *filter_info)
3564 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3566 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3567 return -EINVAL; /* filter index is out of range. */
3568 if (filter->tcp_flags > TCP_FLAG_ALL)
3569 return -EINVAL; /* flags is invalid. */
3571 switch (filter->dst_port_mask) {
3573 filter_info->dst_port_mask = 0;
3574 filter_info->dst_port = filter->dst_port;
3577 filter_info->dst_port_mask = 1;
3580 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3584 switch (filter->proto_mask) {
3586 filter_info->proto_mask = 0;
3587 filter_info->proto = filter->proto;
3590 filter_info->proto_mask = 1;
3593 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3597 filter_info->priority = (uint8_t)filter->priority;
3598 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3599 filter_info->tcp_flags = filter->tcp_flags;
3601 filter_info->tcp_flags = 0;
3606 static inline struct e1000_2tuple_filter *
3607 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
3608 struct e1000_2tuple_filter_info *key)
3610 struct e1000_2tuple_filter *it;
3612 TAILQ_FOREACH(it, filter_list, entries) {
3613 if (memcmp(key, &it->filter_info,
3614 sizeof(struct e1000_2tuple_filter_info)) == 0) {
3622 * igb_add_2tuple_filter - add a 2tuple filter
3625 * dev: Pointer to struct rte_eth_dev.
3626 * ntuple_filter: ponter to the filter that will be added.
3629 * - On success, zero.
3630 * - On failure, a negative value.
3633 igb_add_2tuple_filter(struct rte_eth_dev *dev,
3634 struct rte_eth_ntuple_filter *ntuple_filter)
3636 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3637 struct e1000_filter_info *filter_info =
3638 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3639 struct e1000_2tuple_filter *filter;
3640 uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
3641 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3644 filter = rte_zmalloc("e1000_2tuple_filter",
3645 sizeof(struct e1000_2tuple_filter), 0);
3649 ret = ntuple_filter_to_2tuple(ntuple_filter,
3650 &filter->filter_info);
3655 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3656 &filter->filter_info) != NULL) {
3657 PMD_DRV_LOG(ERR, "filter exists.");
3661 filter->queue = ntuple_filter->queue;
3664 * look for an unused 2tuple filter index,
3665 * and insert the filter to list.
3667 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
3668 if (!(filter_info->twotuple_mask & (1 << i))) {
3669 filter_info->twotuple_mask |= 1 << i;
3671 TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
3677 if (i >= E1000_MAX_TTQF_FILTERS) {
3678 PMD_DRV_LOG(ERR, "2tuple filters are full.");
3683 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3684 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3685 imir |= E1000_IMIR_PORT_BP;
3687 imir &= ~E1000_IMIR_PORT_BP;
3689 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3691 ttqf |= E1000_TTQF_QUEUE_ENABLE;
3692 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
3693 ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
3694 if (filter->filter_info.proto_mask == 0)
3695 ttqf &= ~E1000_TTQF_MASK_ENABLE;
3697 /* tcp flags bits setting. */
3698 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
3699 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
3700 imir_ext |= E1000_IMIREXT_CTRL_URG;
3701 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
3702 imir_ext |= E1000_IMIREXT_CTRL_ACK;
3703 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
3704 imir_ext |= E1000_IMIREXT_CTRL_PSH;
3705 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
3706 imir_ext |= E1000_IMIREXT_CTRL_RST;
3707 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
3708 imir_ext |= E1000_IMIREXT_CTRL_SYN;
3709 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
3710 imir_ext |= E1000_IMIREXT_CTRL_FIN;
3712 imir_ext |= E1000_IMIREXT_CTRL_BP;
3713 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3714 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
3715 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3720 * igb_remove_2tuple_filter - remove a 2tuple filter
3723 * dev: Pointer to struct rte_eth_dev.
3724 * ntuple_filter: ponter to the filter that will be removed.
3727 * - On success, zero.
3728 * - On failure, a negative value.
3731 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
3732 struct rte_eth_ntuple_filter *ntuple_filter)
3734 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3735 struct e1000_filter_info *filter_info =
3736 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3737 struct e1000_2tuple_filter_info filter_2tuple;
3738 struct e1000_2tuple_filter *filter;
3741 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
3742 ret = ntuple_filter_to_2tuple(ntuple_filter,
3747 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3749 if (filter == NULL) {
3750 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3754 filter_info->twotuple_mask &= ~(1 << filter->index);
3755 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
3758 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
3759 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3760 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3764 static inline struct e1000_flex_filter *
3765 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
3766 struct e1000_flex_filter_info *key)
3768 struct e1000_flex_filter *it;
3770 TAILQ_FOREACH(it, filter_list, entries) {
3771 if (memcmp(key, &it->filter_info,
3772 sizeof(struct e1000_flex_filter_info)) == 0)
3780 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
3781 struct rte_eth_flex_filter *filter,
3784 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3785 struct e1000_filter_info *filter_info =
3786 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3787 struct e1000_flex_filter *flex_filter, *it;
3788 uint32_t wufc, queueing, mask;
3790 uint8_t shift, i, j = 0;
3792 flex_filter = rte_zmalloc("e1000_flex_filter",
3793 sizeof(struct e1000_flex_filter), 0);
3794 if (flex_filter == NULL)
3797 flex_filter->filter_info.len = filter->len;
3798 flex_filter->filter_info.priority = filter->priority;
3799 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
3800 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
3802 /* reverse bits in flex filter's mask*/
3803 for (shift = 0; shift < CHAR_BIT; shift++) {
3804 if (filter->mask[i] & (0x01 << shift))
3805 mask |= (0x80 >> shift);
3807 flex_filter->filter_info.mask[i] = mask;
3810 wufc = E1000_READ_REG(hw, E1000_WUFC);
3811 if (flex_filter->index < E1000_MAX_FHFT)
3812 reg_off = E1000_FHFT(flex_filter->index);
3814 reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
3817 if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
3818 &flex_filter->filter_info) != NULL) {
3819 PMD_DRV_LOG(ERR, "filter exists.");
3820 rte_free(flex_filter);
3823 flex_filter->queue = filter->queue;
3825 * look for an unused flex filter index
3826 * and insert the filter into the list.
3828 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
3829 if (!(filter_info->flex_mask & (1 << i))) {
3830 filter_info->flex_mask |= 1 << i;
3831 flex_filter->index = i;
3832 TAILQ_INSERT_TAIL(&filter_info->flex_list,
3838 if (i >= E1000_MAX_FLEX_FILTERS) {
3839 PMD_DRV_LOG(ERR, "flex filters are full.");
3840 rte_free(flex_filter);
3844 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
3845 (E1000_WUFC_FLX0 << flex_filter->index));
3846 queueing = filter->len |
3847 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
3848 (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
3849 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
3851 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
3852 E1000_WRITE_REG(hw, reg_off,
3853 flex_filter->filter_info.dwords[j]);
3854 reg_off += sizeof(uint32_t);
3855 E1000_WRITE_REG(hw, reg_off,
3856 flex_filter->filter_info.dwords[++j]);
3857 reg_off += sizeof(uint32_t);
3858 E1000_WRITE_REG(hw, reg_off,
3859 (uint32_t)flex_filter->filter_info.mask[i]);
3860 reg_off += sizeof(uint32_t) * 2;
3864 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3865 &flex_filter->filter_info);
3867 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3868 rte_free(flex_filter);
3872 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
3873 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
3874 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
3875 (~(E1000_WUFC_FLX0 << it->index)));
3877 filter_info->flex_mask &= ~(1 << it->index);
3878 TAILQ_REMOVE(&filter_info->flex_list, it, entries);
3880 rte_free(flex_filter);
3887 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
3888 struct rte_eth_flex_filter *filter)
3890 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3891 struct e1000_filter_info *filter_info =
3892 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3893 struct e1000_flex_filter flex_filter, *it;
3894 uint32_t wufc, queueing, wufc_en = 0;
3896 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
3897 flex_filter.filter_info.len = filter->len;
3898 flex_filter.filter_info.priority = filter->priority;
3899 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
3900 memcpy(flex_filter.filter_info.mask, filter->mask,
3901 RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
3903 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3904 &flex_filter.filter_info);
3906 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3910 wufc = E1000_READ_REG(hw, E1000_WUFC);
3911 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
3913 if ((wufc & wufc_en) == wufc_en) {
3914 uint32_t reg_off = 0;
3915 if (it->index < E1000_MAX_FHFT)
3916 reg_off = E1000_FHFT(it->index);
3918 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
3920 queueing = E1000_READ_REG(hw,
3921 reg_off + E1000_FHFT_QUEUEING_OFFSET);
3922 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
3923 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
3924 E1000_FHFT_QUEUEING_PRIO_SHIFT;
3925 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
3926 E1000_FHFT_QUEUEING_QUEUE_SHIFT;
3933 eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
3934 enum rte_filter_op filter_op,
3937 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3938 struct rte_eth_flex_filter *filter;
3941 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
3943 if (filter_op == RTE_ETH_FILTER_NOP)
3947 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
3952 filter = (struct rte_eth_flex_filter *)arg;
3953 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
3954 || filter->len % sizeof(uint64_t) != 0) {
3955 PMD_DRV_LOG(ERR, "filter's length is out of range");
3958 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
3959 PMD_DRV_LOG(ERR, "filter's priority is out of range");
3963 switch (filter_op) {
3964 case RTE_ETH_FILTER_ADD:
3965 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
3967 case RTE_ETH_FILTER_DELETE:
3968 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
3970 case RTE_ETH_FILTER_GET:
3971 ret = eth_igb_get_flex_filter(dev, filter);
3974 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
3982 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
3984 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
3985 struct e1000_5tuple_filter_info *filter_info)
3987 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
3989 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3990 return -EINVAL; /* filter index is out of range. */
3991 if (filter->tcp_flags > TCP_FLAG_ALL)
3992 return -EINVAL; /* flags is invalid. */
3994 switch (filter->dst_ip_mask) {
3996 filter_info->dst_ip_mask = 0;
3997 filter_info->dst_ip = filter->dst_ip;
4000 filter_info->dst_ip_mask = 1;
4003 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
4007 switch (filter->src_ip_mask) {
4009 filter_info->src_ip_mask = 0;
4010 filter_info->src_ip = filter->src_ip;
4013 filter_info->src_ip_mask = 1;
4016 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
4020 switch (filter->dst_port_mask) {
4022 filter_info->dst_port_mask = 0;
4023 filter_info->dst_port = filter->dst_port;
4026 filter_info->dst_port_mask = 1;
4029 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
4033 switch (filter->src_port_mask) {
4035 filter_info->src_port_mask = 0;
4036 filter_info->src_port = filter->src_port;
4039 filter_info->src_port_mask = 1;
4042 PMD_DRV_LOG(ERR, "invalid src_port mask.");
4046 switch (filter->proto_mask) {
4048 filter_info->proto_mask = 0;
4049 filter_info->proto = filter->proto;
4052 filter_info->proto_mask = 1;
4055 PMD_DRV_LOG(ERR, "invalid protocol mask.");
4059 filter_info->priority = (uint8_t)filter->priority;
4060 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
4061 filter_info->tcp_flags = filter->tcp_flags;
4063 filter_info->tcp_flags = 0;
4068 static inline struct e1000_5tuple_filter *
4069 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
4070 struct e1000_5tuple_filter_info *key)
4072 struct e1000_5tuple_filter *it;
4074 TAILQ_FOREACH(it, filter_list, entries) {
4075 if (memcmp(key, &it->filter_info,
4076 sizeof(struct e1000_5tuple_filter_info)) == 0) {
4084 * igb_add_5tuple_filter_82576 - add a 5tuple filter
4087 * dev: Pointer to struct rte_eth_dev.
4088 * ntuple_filter: ponter to the filter that will be added.
4091 * - On success, zero.
4092 * - On failure, a negative value.
4095 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
4096 struct rte_eth_ntuple_filter *ntuple_filter)
4098 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4099 struct e1000_filter_info *filter_info =
4100 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4101 struct e1000_5tuple_filter *filter;
4102 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
4103 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
4107 filter = rte_zmalloc("e1000_5tuple_filter",
4108 sizeof(struct e1000_5tuple_filter), 0);
4112 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4113 &filter->filter_info);
4119 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
4120 &filter->filter_info) != NULL) {
4121 PMD_DRV_LOG(ERR, "filter exists.");
4125 filter->queue = ntuple_filter->queue;
4128 * look for an unused 5tuple filter index,
4129 * and insert the filter to list.
4131 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
4132 if (!(filter_info->fivetuple_mask & (1 << i))) {
4133 filter_info->fivetuple_mask |= 1 << i;
4135 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
4141 if (i >= E1000_MAX_FTQF_FILTERS) {
4142 PMD_DRV_LOG(ERR, "5tuple filters are full.");
4147 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
4148 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
4149 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
4150 if (filter->filter_info.dst_ip_mask == 0)
4151 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
4152 if (filter->filter_info.src_port_mask == 0)
4153 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
4154 if (filter->filter_info.proto_mask == 0)
4155 ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
4156 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
4157 E1000_FTQF_QUEUE_MASK;
4158 ftqf |= E1000_FTQF_QUEUE_ENABLE;
4159 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
4160 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
4161 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
4163 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
4164 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
4166 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
4167 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
4168 imir |= E1000_IMIR_PORT_BP;
4170 imir &= ~E1000_IMIR_PORT_BP;
4171 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
4173 /* tcp flags bits setting. */
4174 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
4175 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
4176 imir_ext |= E1000_IMIREXT_CTRL_URG;
4177 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
4178 imir_ext |= E1000_IMIREXT_CTRL_ACK;
4179 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
4180 imir_ext |= E1000_IMIREXT_CTRL_PSH;
4181 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
4182 imir_ext |= E1000_IMIREXT_CTRL_RST;
4183 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
4184 imir_ext |= E1000_IMIREXT_CTRL_SYN;
4185 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
4186 imir_ext |= E1000_IMIREXT_CTRL_FIN;
4188 imir_ext |= E1000_IMIREXT_CTRL_BP;
4189 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
4190 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
4195 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
4198 * dev: Pointer to struct rte_eth_dev.
4199 * ntuple_filter: ponter to the filter that will be removed.
4202 * - On success, zero.
4203 * - On failure, a negative value.
4206 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
4207 struct rte_eth_ntuple_filter *ntuple_filter)
4209 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4210 struct e1000_filter_info *filter_info =
4211 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4212 struct e1000_5tuple_filter_info filter_5tuple;
4213 struct e1000_5tuple_filter *filter;
4216 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
4217 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4222 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
4224 if (filter == NULL) {
4225 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4229 filter_info->fivetuple_mask &= ~(1 << filter->index);
4230 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
4233 E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
4234 E1000_FTQF_VF_BP | E1000_FTQF_MASK);
4235 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
4236 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
4237 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
4238 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
4239 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
4244 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4247 struct e1000_hw *hw;
4248 struct rte_eth_dev_info dev_info;
4249 uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
4252 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4254 #ifdef RTE_LIBRTE_82571_SUPPORT
4255 /* XXX: not bigger than max_rx_pktlen */
4256 if (hw->mac.type == e1000_82571)
4259 eth_igb_infos_get(dev, &dev_info);
4261 /* check that mtu is within the allowed range */
4262 if ((mtu < ETHER_MIN_MTU) ||
4263 (frame_size > dev_info.max_rx_pktlen))
4266 /* refuse mtu that requires the support of scattered packets when this
4267 * feature has not been enabled before. */
4268 if (!dev->data->scattered_rx &&
4269 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
4272 rctl = E1000_READ_REG(hw, E1000_RCTL);
4274 /* switch to jumbo mode if needed */
4275 if (frame_size > ETHER_MAX_LEN) {
4276 dev->data->dev_conf.rxmode.jumbo_frame = 1;
4277 rctl |= E1000_RCTL_LPE;
4279 dev->data->dev_conf.rxmode.jumbo_frame = 0;
4280 rctl &= ~E1000_RCTL_LPE;
4282 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
4284 /* update max frame size */
4285 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4287 E1000_WRITE_REG(hw, E1000_RLPML,
4288 dev->data->dev_conf.rxmode.max_rx_pkt_len);
4294 * igb_add_del_ntuple_filter - add or delete a ntuple filter
4297 * dev: Pointer to struct rte_eth_dev.
4298 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4299 * add: if true, add filter, if false, remove filter
4302 * - On success, zero.
4303 * - On failure, a negative value.
4306 igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
4307 struct rte_eth_ntuple_filter *ntuple_filter,
4310 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4313 switch (ntuple_filter->flags) {
4314 case RTE_5TUPLE_FLAGS:
4315 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4316 if (hw->mac.type != e1000_82576)
4319 ret = igb_add_5tuple_filter_82576(dev,
4322 ret = igb_remove_5tuple_filter_82576(dev,
4325 case RTE_2TUPLE_FLAGS:
4326 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4327 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
4330 ret = igb_add_2tuple_filter(dev, ntuple_filter);
4332 ret = igb_remove_2tuple_filter(dev, ntuple_filter);
4343 * igb_get_ntuple_filter - get a ntuple filter
4346 * dev: Pointer to struct rte_eth_dev.
4347 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4350 * - On success, zero.
4351 * - On failure, a negative value.
4354 igb_get_ntuple_filter(struct rte_eth_dev *dev,
4355 struct rte_eth_ntuple_filter *ntuple_filter)
4357 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4358 struct e1000_filter_info *filter_info =
4359 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4360 struct e1000_5tuple_filter_info filter_5tuple;
4361 struct e1000_2tuple_filter_info filter_2tuple;
4362 struct e1000_5tuple_filter *p_5tuple_filter;
4363 struct e1000_2tuple_filter *p_2tuple_filter;
4366 switch (ntuple_filter->flags) {
4367 case RTE_5TUPLE_FLAGS:
4368 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4369 if (hw->mac.type != e1000_82576)
4371 memset(&filter_5tuple,
4373 sizeof(struct e1000_5tuple_filter_info));
4374 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4378 p_5tuple_filter = igb_5tuple_filter_lookup_82576(
4379 &filter_info->fivetuple_list,
4381 if (p_5tuple_filter == NULL) {
4382 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4385 ntuple_filter->queue = p_5tuple_filter->queue;
4387 case RTE_2TUPLE_FLAGS:
4388 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4389 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
4391 memset(&filter_2tuple,
4393 sizeof(struct e1000_2tuple_filter_info));
4394 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
4397 p_2tuple_filter = igb_2tuple_filter_lookup(
4398 &filter_info->twotuple_list,
4400 if (p_2tuple_filter == NULL) {
4401 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4404 ntuple_filter->queue = p_2tuple_filter->queue;
4415 * igb_ntuple_filter_handle - Handle operations for ntuple filter.
4416 * @dev: pointer to rte_eth_dev structure
4417 * @filter_op:operation will be taken.
4418 * @arg: a pointer to specific structure corresponding to the filter_op
4421 igb_ntuple_filter_handle(struct rte_eth_dev *dev,
4422 enum rte_filter_op filter_op,
4425 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4428 MAC_TYPE_FILTER_SUP(hw->mac.type);
4430 if (filter_op == RTE_ETH_FILTER_NOP)
4434 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
4439 switch (filter_op) {
4440 case RTE_ETH_FILTER_ADD:
4441 ret = igb_add_del_ntuple_filter(dev,
4442 (struct rte_eth_ntuple_filter *)arg,
4445 case RTE_ETH_FILTER_DELETE:
4446 ret = igb_add_del_ntuple_filter(dev,
4447 (struct rte_eth_ntuple_filter *)arg,
4450 case RTE_ETH_FILTER_GET:
4451 ret = igb_get_ntuple_filter(dev,
4452 (struct rte_eth_ntuple_filter *)arg);
4455 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
4463 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
4468 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4469 if (filter_info->ethertype_filters[i] == ethertype &&
4470 (filter_info->ethertype_mask & (1 << i)))
4477 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
4482 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4483 if (!(filter_info->ethertype_mask & (1 << i))) {
4484 filter_info->ethertype_mask |= 1 << i;
4485 filter_info->ethertype_filters[i] = ethertype;
4493 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
4496 if (idx >= E1000_MAX_ETQF_FILTERS)
4498 filter_info->ethertype_mask &= ~(1 << idx);
4499 filter_info->ethertype_filters[idx] = 0;
4505 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
4506 struct rte_eth_ethertype_filter *filter,
4509 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4510 struct e1000_filter_info *filter_info =
4511 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4515 if (filter->ether_type == ETHER_TYPE_IPv4 ||
4516 filter->ether_type == ETHER_TYPE_IPv6) {
4517 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4518 " ethertype filter.", filter->ether_type);
4522 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4523 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4526 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4527 PMD_DRV_LOG(ERR, "drop option is unsupported.");
4531 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
4532 if (ret >= 0 && add) {
4533 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4534 filter->ether_type);
4537 if (ret < 0 && !add) {
4538 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4539 filter->ether_type);
4544 ret = igb_ethertype_filter_insert(filter_info,
4545 filter->ether_type);
4547 PMD_DRV_LOG(ERR, "ethertype filters are full.");
4551 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
4552 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
4553 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
4555 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
4559 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
4560 E1000_WRITE_FLUSH(hw);
4566 igb_get_ethertype_filter(struct rte_eth_dev *dev,
4567 struct rte_eth_ethertype_filter *filter)
4569 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4570 struct e1000_filter_info *filter_info =
4571 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4575 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
4577 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4578 filter->ether_type);
4582 etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
4583 if (etqf & E1000_ETQF_FILTER_ENABLE) {
4584 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
4586 filter->queue = (etqf & E1000_ETQF_QUEUE) >>
4587 E1000_ETQF_QUEUE_SHIFT;
4595 * igb_ethertype_filter_handle - Handle operations for ethertype filter.
4596 * @dev: pointer to rte_eth_dev structure
4597 * @filter_op:operation will be taken.
4598 * @arg: a pointer to specific structure corresponding to the filter_op
4601 igb_ethertype_filter_handle(struct rte_eth_dev *dev,
4602 enum rte_filter_op filter_op,
4605 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4608 MAC_TYPE_FILTER_SUP(hw->mac.type);
4610 if (filter_op == RTE_ETH_FILTER_NOP)
4614 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
4619 switch (filter_op) {
4620 case RTE_ETH_FILTER_ADD:
4621 ret = igb_add_del_ethertype_filter(dev,
4622 (struct rte_eth_ethertype_filter *)arg,
4625 case RTE_ETH_FILTER_DELETE:
4626 ret = igb_add_del_ethertype_filter(dev,
4627 (struct rte_eth_ethertype_filter *)arg,
4630 case RTE_ETH_FILTER_GET:
4631 ret = igb_get_ethertype_filter(dev,
4632 (struct rte_eth_ethertype_filter *)arg);
4635 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
4643 eth_igb_filter_ctrl(struct rte_eth_dev *dev,
4644 enum rte_filter_type filter_type,
4645 enum rte_filter_op filter_op,
4650 switch (filter_type) {
4651 case RTE_ETH_FILTER_NTUPLE:
4652 ret = igb_ntuple_filter_handle(dev, filter_op, arg);
4654 case RTE_ETH_FILTER_ETHERTYPE:
4655 ret = igb_ethertype_filter_handle(dev, filter_op, arg);
4657 case RTE_ETH_FILTER_SYN:
4658 ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
4660 case RTE_ETH_FILTER_FLEXIBLE:
4661 ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
4664 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4673 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
4674 struct ether_addr *mc_addr_set,
4675 uint32_t nb_mc_addr)
4677 struct e1000_hw *hw;
4679 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4680 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
4685 igb_read_systime_cyclecounter(struct rte_eth_dev *dev)
4687 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4688 uint64_t systime_cycles;
4690 switch (hw->mac.type) {
4694 * Need to read System Time Residue Register to be able
4695 * to read the other two registers.
4697 E1000_READ_REG(hw, E1000_SYSTIMR);
4698 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
4699 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4700 systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4707 * Need to read System Time Residue Register to be able
4708 * to read the other two registers.
4710 E1000_READ_REG(hw, E1000_SYSTIMR);
4711 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4712 /* Only the 8 LSB are valid. */
4713 systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH)
4717 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4718 systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4723 return systime_cycles;
4727 igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4729 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4730 uint64_t rx_tstamp_cycles;
4732 switch (hw->mac.type) {
4735 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
4736 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4737 rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4743 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4744 /* Only the 8 LSB are valid. */
4745 rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH)
4749 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4750 rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4755 return rx_tstamp_cycles;
4759 igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4761 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4762 uint64_t tx_tstamp_cycles;
4764 switch (hw->mac.type) {
4767 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
4768 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4769 tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4775 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4776 /* Only the 8 LSB are valid. */
4777 tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH)
4781 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4782 tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4787 return tx_tstamp_cycles;
4791 igb_start_timecounters(struct rte_eth_dev *dev)
4793 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4794 struct e1000_adapter *adapter =
4795 (struct e1000_adapter *)dev->data->dev_private;
4796 uint32_t incval = 1;
4798 uint64_t mask = E1000_CYCLECOUNTER_MASK;
4800 switch (hw->mac.type) {
4804 /* 32 LSB bits + 8 MSB bits = 40 bits */
4805 mask = (1ULL << 40) - 1;
4810 * Start incrementing the register
4811 * used to timestamp PTP packets.
4813 E1000_WRITE_REG(hw, E1000_TIMINCA, incval);
4816 incval = E1000_INCVALUE_82576;
4817 shift = IGB_82576_TSYNC_SHIFT;
4818 E1000_WRITE_REG(hw, E1000_TIMINCA,
4819 E1000_INCPERIOD_82576 | incval);
4826 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4827 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4828 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4830 adapter->systime_tc.cc_mask = mask;
4831 adapter->systime_tc.cc_shift = shift;
4832 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4834 adapter->rx_tstamp_tc.cc_mask = mask;
4835 adapter->rx_tstamp_tc.cc_shift = shift;
4836 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4838 adapter->tx_tstamp_tc.cc_mask = mask;
4839 adapter->tx_tstamp_tc.cc_shift = shift;
4840 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4844 igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4846 struct e1000_adapter *adapter =
4847 (struct e1000_adapter *)dev->data->dev_private;
4849 adapter->systime_tc.nsec += delta;
4850 adapter->rx_tstamp_tc.nsec += delta;
4851 adapter->tx_tstamp_tc.nsec += delta;
4857 igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4860 struct e1000_adapter *adapter =
4861 (struct e1000_adapter *)dev->data->dev_private;
4863 ns = rte_timespec_to_ns(ts);
4865 /* Set the timecounters to a new value. */
4866 adapter->systime_tc.nsec = ns;
4867 adapter->rx_tstamp_tc.nsec = ns;
4868 adapter->tx_tstamp_tc.nsec = ns;
4874 igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4876 uint64_t ns, systime_cycles;
4877 struct e1000_adapter *adapter =
4878 (struct e1000_adapter *)dev->data->dev_private;
4880 systime_cycles = igb_read_systime_cyclecounter(dev);
4881 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4882 *ts = rte_ns_to_timespec(ns);
4888 igb_timesync_enable(struct rte_eth_dev *dev)
4890 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4894 /* Stop the timesync system time. */
4895 E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0);
4896 /* Reset the timesync system time value. */
4897 switch (hw->mac.type) {
4903 E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0);
4906 E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0);
4907 E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0);
4910 /* Not supported. */
4914 /* Enable system time for it isn't on by default. */
4915 tsauxc = E1000_READ_REG(hw, E1000_TSAUXC);
4916 tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME;
4917 E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc);
4919 igb_start_timecounters(dev);
4921 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4922 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
4924 E1000_ETQF_FILTER_ENABLE |
4927 /* Enable timestamping of received PTP packets. */
4928 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4929 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
4930 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
4932 /* Enable Timestamping of transmitted PTP packets. */
4933 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4934 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
4935 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
4941 igb_timesync_disable(struct rte_eth_dev *dev)
4943 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4946 /* Disable timestamping of transmitted PTP packets. */
4947 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4948 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
4949 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
4951 /* Disable timestamping of received PTP packets. */
4952 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4953 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
4954 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
4956 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4957 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
4959 /* Stop incrementating the System Time registers. */
4960 E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
4966 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4967 struct timespec *timestamp,
4968 uint32_t flags __rte_unused)
4970 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4971 struct e1000_adapter *adapter =
4972 (struct e1000_adapter *)dev->data->dev_private;
4973 uint32_t tsync_rxctl;
4974 uint64_t rx_tstamp_cycles;
4977 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4978 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
4981 rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev);
4982 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4983 *timestamp = rte_ns_to_timespec(ns);
4989 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4990 struct timespec *timestamp)
4992 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4993 struct e1000_adapter *adapter =
4994 (struct e1000_adapter *)dev->data->dev_private;
4995 uint32_t tsync_txctl;
4996 uint64_t tx_tstamp_cycles;
4999 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
5000 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
5003 tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev);
5004 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
5005 *timestamp = rte_ns_to_timespec(ns);
5011 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
5015 const struct reg_info *reg_group;
5017 while ((reg_group = igb_regs[g_ind++]))
5018 count += igb_reg_group_count(reg_group);
5024 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
5028 const struct reg_info *reg_group;
5030 while ((reg_group = igbvf_regs[g_ind++]))
5031 count += igb_reg_group_count(reg_group);
5037 eth_igb_get_regs(struct rte_eth_dev *dev,
5038 struct rte_dev_reg_info *regs)
5040 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5041 uint32_t *data = regs->data;
5044 const struct reg_info *reg_group;
5047 regs->length = eth_igb_get_reg_length(dev);
5048 regs->width = sizeof(uint32_t);
5052 /* Support only full register dump */
5053 if ((regs->length == 0) ||
5054 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
5055 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
5057 while ((reg_group = igb_regs[g_ind++]))
5058 count += igb_read_regs_group(dev, &data[count],
5067 igbvf_get_regs(struct rte_eth_dev *dev,
5068 struct rte_dev_reg_info *regs)
5070 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5071 uint32_t *data = regs->data;
5074 const struct reg_info *reg_group;
5077 regs->length = igbvf_get_reg_length(dev);
5078 regs->width = sizeof(uint32_t);
5082 /* Support only full register dump */
5083 if ((regs->length == 0) ||
5084 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
5085 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
5087 while ((reg_group = igbvf_regs[g_ind++]))
5088 count += igb_read_regs_group(dev, &data[count],
5097 eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
5099 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5101 /* Return unit is byte count */
5102 return hw->nvm.word_size * 2;
5106 eth_igb_get_eeprom(struct rte_eth_dev *dev,
5107 struct rte_dev_eeprom_info *in_eeprom)
5109 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5110 struct e1000_nvm_info *nvm = &hw->nvm;
5111 uint16_t *data = in_eeprom->data;
5114 first = in_eeprom->offset >> 1;
5115 length = in_eeprom->length >> 1;
5116 if ((first >= hw->nvm.word_size) ||
5117 ((first + length) >= hw->nvm.word_size))
5120 in_eeprom->magic = hw->vendor_id |
5121 ((uint32_t)hw->device_id << 16);
5123 if ((nvm->ops.read) == NULL)
5126 return nvm->ops.read(hw, first, length, data);
5130 eth_igb_set_eeprom(struct rte_eth_dev *dev,
5131 struct rte_dev_eeprom_info *in_eeprom)
5133 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5134 struct e1000_nvm_info *nvm = &hw->nvm;
5135 uint16_t *data = in_eeprom->data;
5138 first = in_eeprom->offset >> 1;
5139 length = in_eeprom->length >> 1;
5140 if ((first >= hw->nvm.word_size) ||
5141 ((first + length) >= hw->nvm.word_size))
5144 in_eeprom->magic = (uint32_t)hw->vendor_id |
5145 ((uint32_t)hw->device_id << 16);
5147 if ((nvm->ops.write) == NULL)
5149 return nvm->ops.write(hw, first, length, data);
5153 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5155 struct e1000_hw *hw =
5156 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5157 uint32_t mask = 1 << queue_id;
5159 E1000_WRITE_REG(hw, E1000_EIMC, mask);
5160 E1000_WRITE_FLUSH(hw);
5166 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5168 struct e1000_hw *hw =
5169 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5170 struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
5171 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5172 uint32_t mask = 1 << queue_id;
5175 regval = E1000_READ_REG(hw, E1000_EIMS);
5176 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
5177 E1000_WRITE_FLUSH(hw);
5179 rte_intr_enable(intr_handle);
5185 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
5186 uint8_t index, uint8_t offset)
5188 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
5191 val &= ~((uint32_t)0xFF << offset);
5193 /* write vector and valid bit */
5194 val |= (msix_vector | E1000_IVAR_VALID) << offset;
5196 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
5200 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
5201 uint8_t queue, uint8_t msix_vector)
5205 if (hw->mac.type == e1000_82575) {
5207 tmp = E1000_EICR_RX_QUEUE0 << queue;
5208 else if (direction == 1)
5209 tmp = E1000_EICR_TX_QUEUE0 << queue;
5210 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
5211 } else if (hw->mac.type == e1000_82576) {
5212 if ((direction == 0) || (direction == 1))
5213 eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
5214 ((queue & 0x8) << 1) +
5216 } else if ((hw->mac.type == e1000_82580) ||
5217 (hw->mac.type == e1000_i350) ||
5218 (hw->mac.type == e1000_i354) ||
5219 (hw->mac.type == e1000_i210) ||
5220 (hw->mac.type == e1000_i211)) {
5221 if ((direction == 0) || (direction == 1))
5222 eth_igb_write_ivar(hw, msix_vector,
5224 ((queue & 0x1) << 4) +
5229 /* Sets up the hardware to generate MSI-X interrupts properly
5231 * board private structure
5234 eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
5237 uint32_t tmpval, regval, intr_mask;
5238 struct e1000_hw *hw =
5239 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5240 uint32_t vec = E1000_MISC_VEC_ID;
5241 uint32_t base = E1000_MISC_VEC_ID;
5242 uint32_t misc_shift = 0;
5243 struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
5244 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5246 /* won't configure msix register if no mapping is done
5247 * between intr vector and event fd
5249 if (!rte_intr_dp_is_en(intr_handle))
5252 if (rte_intr_allow_others(intr_handle)) {
5253 vec = base = E1000_RX_VEC_START;
5257 /* set interrupt vector for other causes */
5258 if (hw->mac.type == e1000_82575) {
5259 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
5260 /* enable MSI-X PBA support */
5261 tmpval |= E1000_CTRL_EXT_PBA_CLR;
5263 /* Auto-Mask interrupts upon ICR read */
5264 tmpval |= E1000_CTRL_EXT_EIAME;
5265 tmpval |= E1000_CTRL_EXT_IRCA;
5267 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
5269 /* enable msix_other interrupt */
5270 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
5271 regval = E1000_READ_REG(hw, E1000_EIAC);
5272 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
5273 regval = E1000_READ_REG(hw, E1000_EIAM);
5274 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
5275 } else if ((hw->mac.type == e1000_82576) ||
5276 (hw->mac.type == e1000_82580) ||
5277 (hw->mac.type == e1000_i350) ||
5278 (hw->mac.type == e1000_i354) ||
5279 (hw->mac.type == e1000_i210) ||
5280 (hw->mac.type == e1000_i211)) {
5281 /* turn on MSI-X capability first */
5282 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
5283 E1000_GPIE_PBA | E1000_GPIE_EIAME |
5285 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
5287 regval = E1000_READ_REG(hw, E1000_EIAC);
5288 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
5290 /* enable msix_other interrupt */
5291 regval = E1000_READ_REG(hw, E1000_EIMS);
5292 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
5293 tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
5294 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
5297 /* use EIAM to auto-mask when MSI-X interrupt
5298 * is asserted, this saves a register write for every interrupt
5300 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
5302 regval = E1000_READ_REG(hw, E1000_EIAM);
5303 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
5305 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
5306 eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
5307 intr_handle->intr_vec[queue_id] = vec;
5308 if (vec < base + intr_handle->nb_efd - 1)
5312 E1000_WRITE_FLUSH(hw);
5315 RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd.pci_drv);
5316 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
5317 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio");
5318 RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd.pci_drv);
5319 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
5320 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio");