4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
51 #include <rte_atomic.h>
52 #include <rte_malloc.h>
55 #include "e1000_logs.h"
56 #include "base/e1000_api.h"
57 #include "e1000_ethdev.h"
61 * Default values for port configuration
63 #define IGB_DEFAULT_RX_FREE_THRESH 32
65 #define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
66 #define IGB_DEFAULT_RX_HTHRESH 8
67 #define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4)
69 #define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
70 #define IGB_DEFAULT_TX_HTHRESH 1
71 #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16)
73 #define IGB_HKEY_MAX_INDEX 10
75 /* Bit shift and mask */
76 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
77 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
78 #define IGB_8_BIT_WIDTH CHAR_BIT
79 #define IGB_8_BIT_MASK UINT8_MAX
81 /* Additional timesync values. */
82 #define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL
83 #define E1000_ETQF_FILTER_1588 3
84 #define IGB_82576_TSYNC_SHIFT 16
85 #define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
86 #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
87 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
89 #define E1000_VTIVAR_MISC 0x01740
90 #define E1000_VTIVAR_MISC_MASK 0xFF
91 #define E1000_VTIVAR_VALID 0x80
92 #define E1000_VTIVAR_MISC_MAILBOX 0
93 #define E1000_VTIVAR_MISC_INTR_MASK 0x3
95 /* External VLAN Enable bit mask */
96 #define E1000_CTRL_EXT_EXT_VLAN (1 << 26)
98 /* External VLAN Ether Type bit mask and shift */
99 #define E1000_VET_VET_EXT 0xFFFF0000
100 #define E1000_VET_VET_EXT_SHIFT 16
102 static int eth_igb_configure(struct rte_eth_dev *dev);
103 static int eth_igb_start(struct rte_eth_dev *dev);
104 static void eth_igb_stop(struct rte_eth_dev *dev);
105 static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev);
106 static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev);
107 static void eth_igb_close(struct rte_eth_dev *dev);
108 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
109 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
110 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
111 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
112 static int eth_igb_link_update(struct rte_eth_dev *dev,
113 int wait_to_complete);
114 static void eth_igb_stats_get(struct rte_eth_dev *dev,
115 struct rte_eth_stats *rte_stats);
116 static int eth_igb_xstats_get(struct rte_eth_dev *dev,
117 struct rte_eth_xstat *xstats, unsigned n);
118 static int eth_igb_xstats_get_names(struct rte_eth_dev *dev,
119 struct rte_eth_xstat_name *xstats_names,
121 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
122 static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
123 static void eth_igb_infos_get(struct rte_eth_dev *dev,
124 struct rte_eth_dev_info *dev_info);
125 static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
126 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
127 struct rte_eth_dev_info *dev_info);
128 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
129 struct rte_eth_fc_conf *fc_conf);
130 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
131 struct rte_eth_fc_conf *fc_conf);
132 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
133 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
134 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
135 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
136 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
138 static int igb_hardware_init(struct e1000_hw *hw);
139 static void igb_hw_control_acquire(struct e1000_hw *hw);
140 static void igb_hw_control_release(struct e1000_hw *hw);
141 static void igb_init_manageability(struct e1000_hw *hw);
142 static void igb_release_manageability(struct e1000_hw *hw);
144 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
146 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
147 uint16_t vlan_id, int on);
148 static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
149 enum rte_vlan_type vlan_type,
151 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
153 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
154 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
155 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
156 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
157 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
158 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
160 static int eth_igb_led_on(struct rte_eth_dev *dev);
161 static int eth_igb_led_off(struct rte_eth_dev *dev);
163 static void igb_intr_disable(struct e1000_hw *hw);
164 static int igb_get_rx_buffer_size(struct e1000_hw *hw);
165 static void eth_igb_rar_set(struct rte_eth_dev *dev,
166 struct ether_addr *mac_addr,
167 uint32_t index, uint32_t pool);
168 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
169 static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
170 struct ether_addr *addr);
172 static void igbvf_intr_disable(struct e1000_hw *hw);
173 static int igbvf_dev_configure(struct rte_eth_dev *dev);
174 static int igbvf_dev_start(struct rte_eth_dev *dev);
175 static void igbvf_dev_stop(struct rte_eth_dev *dev);
176 static void igbvf_dev_close(struct rte_eth_dev *dev);
177 static void igbvf_promiscuous_enable(struct rte_eth_dev *dev);
178 static void igbvf_promiscuous_disable(struct rte_eth_dev *dev);
179 static void igbvf_allmulticast_enable(struct rte_eth_dev *dev);
180 static void igbvf_allmulticast_disable(struct rte_eth_dev *dev);
181 static int eth_igbvf_link_update(struct e1000_hw *hw);
182 static void eth_igbvf_stats_get(struct rte_eth_dev *dev,
183 struct rte_eth_stats *rte_stats);
184 static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
185 struct rte_eth_xstat *xstats, unsigned n);
186 static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev,
187 struct rte_eth_xstat_name *xstats_names,
189 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
190 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
191 uint16_t vlan_id, int on);
192 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
193 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
194 static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
195 struct ether_addr *addr);
196 static int igbvf_get_reg_length(struct rte_eth_dev *dev);
197 static int igbvf_get_regs(struct rte_eth_dev *dev,
198 struct rte_dev_reg_info *regs);
200 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
201 struct rte_eth_rss_reta_entry64 *reta_conf,
203 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
204 struct rte_eth_rss_reta_entry64 *reta_conf,
207 static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
208 struct rte_eth_syn_filter *filter,
210 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
211 struct rte_eth_syn_filter *filter);
212 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
213 enum rte_filter_op filter_op,
215 static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
216 struct rte_eth_ntuple_filter *ntuple_filter);
217 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
218 struct rte_eth_ntuple_filter *ntuple_filter);
219 static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
220 struct rte_eth_flex_filter *filter,
222 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
223 struct rte_eth_flex_filter *filter);
224 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
225 enum rte_filter_op filter_op,
227 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
228 struct rte_eth_ntuple_filter *ntuple_filter);
229 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
230 struct rte_eth_ntuple_filter *ntuple_filter);
231 static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
232 struct rte_eth_ntuple_filter *filter,
234 static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
235 struct rte_eth_ntuple_filter *filter);
236 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
237 enum rte_filter_op filter_op,
239 static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
240 struct rte_eth_ethertype_filter *filter,
242 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
243 enum rte_filter_op filter_op,
245 static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
246 struct rte_eth_ethertype_filter *filter);
247 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
248 enum rte_filter_type filter_type,
249 enum rte_filter_op filter_op,
251 static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
252 static int eth_igb_get_regs(struct rte_eth_dev *dev,
253 struct rte_dev_reg_info *regs);
254 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
255 static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
256 struct rte_dev_eeprom_info *eeprom);
257 static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
258 struct rte_dev_eeprom_info *eeprom);
259 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
260 struct ether_addr *mc_addr_set,
261 uint32_t nb_mc_addr);
262 static int igb_timesync_enable(struct rte_eth_dev *dev);
263 static int igb_timesync_disable(struct rte_eth_dev *dev);
264 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
265 struct timespec *timestamp,
267 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
268 struct timespec *timestamp);
269 static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
270 static int igb_timesync_read_time(struct rte_eth_dev *dev,
271 struct timespec *timestamp);
272 static int igb_timesync_write_time(struct rte_eth_dev *dev,
273 const struct timespec *timestamp);
274 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
276 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
278 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
279 uint8_t queue, uint8_t msix_vector);
280 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
281 uint8_t index, uint8_t offset);
282 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
283 static void eth_igbvf_interrupt_handler(struct rte_intr_handle *handle,
285 static void igbvf_mbx_process(struct rte_eth_dev *dev);
288 * Define VF Stats MACRO for Non "cleared on read" register
290 #define UPDATE_VF_STAT(reg, last, cur) \
292 u32 latest = E1000_READ_REG(hw, reg); \
293 cur += (latest - last) & UINT_MAX; \
297 #define IGB_FC_PAUSE_TIME 0x0680
298 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
299 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
301 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
303 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
306 * The set of PCI devices this driver supports
308 static const struct rte_pci_id pci_id_igb_map[] = {
310 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
311 #include "rte_pci_dev_ids.h"
317 * The set of PCI devices this driver supports (for 82576&I350 VF)
319 static const struct rte_pci_id pci_id_igbvf_map[] = {
321 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
322 #include "rte_pci_dev_ids.h"
327 static const struct rte_eth_desc_lim rx_desc_lim = {
328 .nb_max = E1000_MAX_RING_DESC,
329 .nb_min = E1000_MIN_RING_DESC,
330 .nb_align = IGB_RXD_ALIGN,
333 static const struct rte_eth_desc_lim tx_desc_lim = {
334 .nb_max = E1000_MAX_RING_DESC,
335 .nb_min = E1000_MIN_RING_DESC,
336 .nb_align = IGB_RXD_ALIGN,
339 static const struct eth_dev_ops eth_igb_ops = {
340 .dev_configure = eth_igb_configure,
341 .dev_start = eth_igb_start,
342 .dev_stop = eth_igb_stop,
343 .dev_set_link_up = eth_igb_dev_set_link_up,
344 .dev_set_link_down = eth_igb_dev_set_link_down,
345 .dev_close = eth_igb_close,
346 .promiscuous_enable = eth_igb_promiscuous_enable,
347 .promiscuous_disable = eth_igb_promiscuous_disable,
348 .allmulticast_enable = eth_igb_allmulticast_enable,
349 .allmulticast_disable = eth_igb_allmulticast_disable,
350 .link_update = eth_igb_link_update,
351 .stats_get = eth_igb_stats_get,
352 .xstats_get = eth_igb_xstats_get,
353 .xstats_get_names = eth_igb_xstats_get_names,
354 .stats_reset = eth_igb_stats_reset,
355 .xstats_reset = eth_igb_xstats_reset,
356 .dev_infos_get = eth_igb_infos_get,
357 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
358 .mtu_set = eth_igb_mtu_set,
359 .vlan_filter_set = eth_igb_vlan_filter_set,
360 .vlan_tpid_set = eth_igb_vlan_tpid_set,
361 .vlan_offload_set = eth_igb_vlan_offload_set,
362 .rx_queue_setup = eth_igb_rx_queue_setup,
363 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
364 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
365 .rx_queue_release = eth_igb_rx_queue_release,
366 .rx_queue_count = eth_igb_rx_queue_count,
367 .rx_descriptor_done = eth_igb_rx_descriptor_done,
368 .tx_queue_setup = eth_igb_tx_queue_setup,
369 .tx_queue_release = eth_igb_tx_queue_release,
370 .dev_led_on = eth_igb_led_on,
371 .dev_led_off = eth_igb_led_off,
372 .flow_ctrl_get = eth_igb_flow_ctrl_get,
373 .flow_ctrl_set = eth_igb_flow_ctrl_set,
374 .mac_addr_add = eth_igb_rar_set,
375 .mac_addr_remove = eth_igb_rar_clear,
376 .mac_addr_set = eth_igb_default_mac_addr_set,
377 .reta_update = eth_igb_rss_reta_update,
378 .reta_query = eth_igb_rss_reta_query,
379 .rss_hash_update = eth_igb_rss_hash_update,
380 .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
381 .filter_ctrl = eth_igb_filter_ctrl,
382 .set_mc_addr_list = eth_igb_set_mc_addr_list,
383 .rxq_info_get = igb_rxq_info_get,
384 .txq_info_get = igb_txq_info_get,
385 .timesync_enable = igb_timesync_enable,
386 .timesync_disable = igb_timesync_disable,
387 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
388 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
389 .get_reg_length = eth_igb_get_reg_length,
390 .get_reg = eth_igb_get_regs,
391 .get_eeprom_length = eth_igb_get_eeprom_length,
392 .get_eeprom = eth_igb_get_eeprom,
393 .set_eeprom = eth_igb_set_eeprom,
394 .timesync_adjust_time = igb_timesync_adjust_time,
395 .timesync_read_time = igb_timesync_read_time,
396 .timesync_write_time = igb_timesync_write_time,
400 * dev_ops for virtual function, bare necessities for basic vf
401 * operation have been implemented
403 static const struct eth_dev_ops igbvf_eth_dev_ops = {
404 .dev_configure = igbvf_dev_configure,
405 .dev_start = igbvf_dev_start,
406 .dev_stop = igbvf_dev_stop,
407 .dev_close = igbvf_dev_close,
408 .promiscuous_enable = igbvf_promiscuous_enable,
409 .promiscuous_disable = igbvf_promiscuous_disable,
410 .allmulticast_enable = igbvf_allmulticast_enable,
411 .allmulticast_disable = igbvf_allmulticast_disable,
412 .link_update = eth_igb_link_update,
413 .stats_get = eth_igbvf_stats_get,
414 .xstats_get = eth_igbvf_xstats_get,
415 .xstats_get_names = eth_igbvf_xstats_get_names,
416 .stats_reset = eth_igbvf_stats_reset,
417 .xstats_reset = eth_igbvf_stats_reset,
418 .vlan_filter_set = igbvf_vlan_filter_set,
419 .dev_infos_get = eth_igbvf_infos_get,
420 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
421 .rx_queue_setup = eth_igb_rx_queue_setup,
422 .rx_queue_release = eth_igb_rx_queue_release,
423 .tx_queue_setup = eth_igb_tx_queue_setup,
424 .tx_queue_release = eth_igb_tx_queue_release,
425 .set_mc_addr_list = eth_igb_set_mc_addr_list,
426 .rxq_info_get = igb_rxq_info_get,
427 .txq_info_get = igb_txq_info_get,
428 .mac_addr_set = igbvf_default_mac_addr_set,
429 .get_reg_length = igbvf_get_reg_length,
430 .get_reg = igbvf_get_regs,
433 /* store statistics names and its offset in stats structure */
434 struct rte_igb_xstats_name_off {
435 char name[RTE_ETH_XSTATS_NAME_SIZE];
439 static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = {
440 {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)},
441 {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)},
442 {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)},
443 {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)},
444 {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)},
445 {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)},
446 {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats,
448 {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)},
449 {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)},
450 {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)},
451 {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)},
452 {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)},
453 {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)},
454 {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)},
455 {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)},
456 {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)},
457 {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)},
458 {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats,
460 {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)},
461 {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)},
462 {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)},
463 {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)},
464 {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
466 {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats,
468 {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)},
469 {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)},
470 {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)},
471 {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)},
472 {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)},
473 {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)},
474 {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)},
475 {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)},
476 {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)},
477 {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)},
478 {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)},
479 {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)},
480 {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)},
481 {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)},
482 {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)},
483 {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)},
484 {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)},
485 {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
487 {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats,
489 {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)},
490 {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)},
491 {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)},
492 {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)},
493 {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)},
494 {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)},
495 {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)},
497 {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)},
500 #define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \
501 sizeof(rte_igb_stats_strings[0]))
503 static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = {
504 {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)},
505 {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)},
506 {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)},
507 {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)},
508 {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)},
511 #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
512 sizeof(rte_igbvf_stats_strings[0]))
515 * Atomically reads the link status information from global
516 * structure rte_eth_dev.
519 * - Pointer to the structure rte_eth_dev to read from.
520 * - Pointer to the buffer to be saved with the link status.
523 * - On success, zero.
524 * - On failure, negative value.
527 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
528 struct rte_eth_link *link)
530 struct rte_eth_link *dst = link;
531 struct rte_eth_link *src = &(dev->data->dev_link);
533 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
534 *(uint64_t *)src) == 0)
541 * Atomically writes the link status information into global
542 * structure rte_eth_dev.
545 * - Pointer to the structure rte_eth_dev to read from.
546 * - Pointer to the buffer to be saved with the link status.
549 * - On success, zero.
550 * - On failure, negative value.
553 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
554 struct rte_eth_link *link)
556 struct rte_eth_link *dst = &(dev->data->dev_link);
557 struct rte_eth_link *src = link;
559 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
560 *(uint64_t *)src) == 0)
567 igb_intr_enable(struct rte_eth_dev *dev)
569 struct e1000_interrupt *intr =
570 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
571 struct e1000_hw *hw =
572 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
574 E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
575 E1000_WRITE_FLUSH(hw);
579 igb_intr_disable(struct e1000_hw *hw)
581 E1000_WRITE_REG(hw, E1000_IMC, ~0);
582 E1000_WRITE_FLUSH(hw);
586 igbvf_intr_enable(struct rte_eth_dev *dev)
588 struct e1000_hw *hw =
589 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
591 /* only for mailbox */
592 E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX);
593 E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX);
594 E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX);
595 E1000_WRITE_FLUSH(hw);
598 /* only for mailbox now. If RX/TX needed, should extend this function. */
600 igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector)
605 tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK);
606 tmp |= E1000_VTIVAR_VALID;
607 E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp);
611 eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev)
613 struct e1000_hw *hw =
614 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
616 /* Configure VF other cause ivar */
617 igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX);
620 static inline int32_t
621 igb_pf_reset_hw(struct e1000_hw *hw)
626 status = e1000_reset_hw(hw);
628 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
629 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
630 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
631 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
632 E1000_WRITE_FLUSH(hw);
638 igb_identify_hardware(struct rte_eth_dev *dev)
640 struct e1000_hw *hw =
641 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
643 hw->vendor_id = dev->pci_dev->id.vendor_id;
644 hw->device_id = dev->pci_dev->id.device_id;
645 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
646 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
648 e1000_set_mac_type(hw);
650 /* need to check if it is a vf device below */
654 igb_reset_swfw_lock(struct e1000_hw *hw)
659 * Do mac ops initialization manually here, since we will need
660 * some function pointers set by this call.
662 ret_val = e1000_init_mac_params(hw);
667 * SMBI lock should not fail in this early stage. If this is the case,
668 * it is due to an improper exit of the application.
669 * So force the release of the faulty lock.
671 if (e1000_get_hw_semaphore_generic(hw) < 0) {
672 PMD_DRV_LOG(DEBUG, "SMBI lock released");
674 e1000_put_hw_semaphore_generic(hw);
676 if (hw->mac.ops.acquire_swfw_sync != NULL) {
680 * Phy lock should not fail in this early stage. If this is the case,
681 * it is due to an improper exit of the application.
682 * So force the release of the faulty lock.
684 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
685 if (hw->bus.func > E1000_FUNC_1)
687 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
688 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
691 hw->mac.ops.release_swfw_sync(hw, mask);
694 * This one is more tricky since it is common to all ports; but
695 * swfw_sync retries last long enough (1s) to be almost sure that if
696 * lock can not be taken it is due to an improper lock of the
699 mask = E1000_SWFW_EEP_SM;
700 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
701 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
703 hw->mac.ops.release_swfw_sync(hw, mask);
706 return E1000_SUCCESS;
710 eth_igb_dev_init(struct rte_eth_dev *eth_dev)
713 struct rte_pci_device *pci_dev;
714 struct e1000_hw *hw =
715 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
716 struct e1000_vfta * shadow_vfta =
717 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
718 struct e1000_filter_info *filter_info =
719 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
720 struct e1000_adapter *adapter =
721 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
725 pci_dev = eth_dev->pci_dev;
727 eth_dev->dev_ops = ð_igb_ops;
728 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
729 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
731 /* for secondary processes, we don't initialise any further as primary
732 * has already done this work. Only check we don't need a different
734 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
735 if (eth_dev->data->scattered_rx)
736 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
740 rte_eth_copy_pci_info(eth_dev, pci_dev);
742 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
744 igb_identify_hardware(eth_dev);
745 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
750 e1000_get_bus_info(hw);
752 /* Reset any pending lock */
753 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
758 /* Finish initialization */
759 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
765 hw->phy.autoneg_wait_to_complete = 0;
766 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
769 if (hw->phy.media_type == e1000_media_type_copper) {
770 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
771 hw->phy.disable_polarity_correction = 0;
772 hw->phy.ms_type = e1000_ms_hw_default;
776 * Start from a known state, this is important in reading the nvm
781 /* Make sure we have a good EEPROM before we read from it */
782 if (e1000_validate_nvm_checksum(hw) < 0) {
784 * Some PCI-E parts fail the first check due to
785 * the link being in sleep state, call it again,
786 * if it fails a second time its a real issue.
788 if (e1000_validate_nvm_checksum(hw) < 0) {
789 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
795 /* Read the permanent MAC address out of the EEPROM */
796 if (e1000_read_mac_addr(hw) != 0) {
797 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
802 /* Allocate memory for storing MAC addresses */
803 eth_dev->data->mac_addrs = rte_zmalloc("e1000",
804 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
805 if (eth_dev->data->mac_addrs == NULL) {
806 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
807 "store MAC addresses",
808 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
813 /* Copy the permanent MAC address */
814 ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]);
816 /* initialize the vfta */
817 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
819 /* Now initialize the hardware */
820 if (igb_hardware_init(hw) != 0) {
821 PMD_INIT_LOG(ERR, "Hardware initialization failed");
822 rte_free(eth_dev->data->mac_addrs);
823 eth_dev->data->mac_addrs = NULL;
827 hw->mac.get_link_status = 1;
828 adapter->stopped = 0;
830 /* Indicate SOL/IDER usage */
831 if (e1000_check_reset_block(hw) < 0) {
832 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
836 /* initialize PF if max_vfs not zero */
837 igb_pf_host_init(eth_dev);
839 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
840 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
841 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
842 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
843 E1000_WRITE_FLUSH(hw);
845 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
846 eth_dev->data->port_id, pci_dev->id.vendor_id,
847 pci_dev->id.device_id);
849 rte_intr_callback_register(&pci_dev->intr_handle,
850 eth_igb_interrupt_handler,
853 /* enable uio/vfio intr/eventfd mapping */
854 rte_intr_enable(&pci_dev->intr_handle);
856 /* enable support intr */
857 igb_intr_enable(eth_dev);
859 TAILQ_INIT(&filter_info->flex_list);
860 filter_info->flex_mask = 0;
861 TAILQ_INIT(&filter_info->twotuple_list);
862 filter_info->twotuple_mask = 0;
863 TAILQ_INIT(&filter_info->fivetuple_list);
864 filter_info->fivetuple_mask = 0;
869 igb_hw_control_release(hw);
875 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
877 struct rte_pci_device *pci_dev;
879 struct e1000_adapter *adapter =
880 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
882 PMD_INIT_FUNC_TRACE();
884 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
887 hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
888 pci_dev = eth_dev->pci_dev;
890 if (adapter->stopped == 0)
891 eth_igb_close(eth_dev);
893 eth_dev->dev_ops = NULL;
894 eth_dev->rx_pkt_burst = NULL;
895 eth_dev->tx_pkt_burst = NULL;
897 /* Reset any pending lock */
898 igb_reset_swfw_lock(hw);
900 rte_free(eth_dev->data->mac_addrs);
901 eth_dev->data->mac_addrs = NULL;
903 /* uninitialize PF if max_vfs not zero */
904 igb_pf_host_uninit(eth_dev);
906 /* disable uio intr before callback unregister */
907 rte_intr_disable(&(pci_dev->intr_handle));
908 rte_intr_callback_unregister(&(pci_dev->intr_handle),
909 eth_igb_interrupt_handler, (void *)eth_dev);
915 * Virtual Function device init
918 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
920 struct rte_pci_device *pci_dev;
921 struct e1000_adapter *adapter =
922 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
923 struct e1000_hw *hw =
924 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
926 struct ether_addr *perm_addr = (struct ether_addr *)hw->mac.perm_addr;
928 PMD_INIT_FUNC_TRACE();
930 eth_dev->dev_ops = &igbvf_eth_dev_ops;
931 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
932 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
934 /* for secondary processes, we don't initialise any further as primary
935 * has already done this work. Only check we don't need a different
937 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
938 if (eth_dev->data->scattered_rx)
939 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
943 pci_dev = eth_dev->pci_dev;
945 rte_eth_copy_pci_info(eth_dev, pci_dev);
947 hw->device_id = pci_dev->id.device_id;
948 hw->vendor_id = pci_dev->id.vendor_id;
949 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
950 adapter->stopped = 0;
952 /* Initialize the shared code (base driver) */
953 diag = e1000_setup_init_funcs(hw, TRUE);
955 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
960 /* init_mailbox_params */
961 hw->mbx.ops.init_params(hw);
963 /* Disable the interrupts for VF */
964 igbvf_intr_disable(hw);
966 diag = hw->mac.ops.reset_hw(hw);
968 /* Allocate memory for storing MAC addresses */
969 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
970 hw->mac.rar_entry_count, 0);
971 if (eth_dev->data->mac_addrs == NULL) {
973 "Failed to allocate %d bytes needed to store MAC "
975 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
979 /* Generate a random MAC address, if none was assigned by PF. */
980 if (is_zero_ether_addr(perm_addr)) {
981 eth_random_addr(perm_addr->addr_bytes);
982 diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
984 rte_free(eth_dev->data->mac_addrs);
985 eth_dev->data->mac_addrs = NULL;
988 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
989 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
990 "%02x:%02x:%02x:%02x:%02x:%02x",
991 perm_addr->addr_bytes[0],
992 perm_addr->addr_bytes[1],
993 perm_addr->addr_bytes[2],
994 perm_addr->addr_bytes[3],
995 perm_addr->addr_bytes[4],
996 perm_addr->addr_bytes[5]);
999 /* Copy the permanent MAC address */
1000 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1001 ð_dev->data->mac_addrs[0]);
1003 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
1005 eth_dev->data->port_id, pci_dev->id.vendor_id,
1006 pci_dev->id.device_id, "igb_mac_82576_vf");
1008 rte_intr_callback_register(&pci_dev->intr_handle,
1009 eth_igbvf_interrupt_handler,
1016 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
1018 struct e1000_adapter *adapter =
1019 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
1020 struct rte_pci_device *pci_dev = eth_dev->pci_dev;
1022 PMD_INIT_FUNC_TRACE();
1024 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1027 if (adapter->stopped == 0)
1028 igbvf_dev_close(eth_dev);
1030 eth_dev->dev_ops = NULL;
1031 eth_dev->rx_pkt_burst = NULL;
1032 eth_dev->tx_pkt_burst = NULL;
1034 rte_free(eth_dev->data->mac_addrs);
1035 eth_dev->data->mac_addrs = NULL;
1037 /* disable uio intr before callback unregister */
1038 rte_intr_disable(&pci_dev->intr_handle);
1039 rte_intr_callback_unregister(&pci_dev->intr_handle,
1040 eth_igbvf_interrupt_handler,
1046 static struct eth_driver rte_igb_pmd = {
1048 .name = "rte_igb_pmd",
1049 .id_table = pci_id_igb_map,
1050 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1051 RTE_PCI_DRV_DETACHABLE,
1053 .eth_dev_init = eth_igb_dev_init,
1054 .eth_dev_uninit = eth_igb_dev_uninit,
1055 .dev_private_size = sizeof(struct e1000_adapter),
1059 * virtual function driver struct
1061 static struct eth_driver rte_igbvf_pmd = {
1063 .name = "rte_igbvf_pmd",
1064 .id_table = pci_id_igbvf_map,
1065 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
1067 .eth_dev_init = eth_igbvf_dev_init,
1068 .eth_dev_uninit = eth_igbvf_dev_uninit,
1069 .dev_private_size = sizeof(struct e1000_adapter),
1073 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
1075 rte_eth_driver_register(&rte_igb_pmd);
1080 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1082 struct e1000_hw *hw =
1083 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1084 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
1085 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
1086 rctl |= E1000_RCTL_VFE;
1087 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1091 * VF Driver initialization routine.
1092 * Invoked one at EAL init time.
1093 * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
1096 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
1098 PMD_INIT_FUNC_TRACE();
1100 rte_eth_driver_register(&rte_igbvf_pmd);
1105 igb_check_mq_mode(struct rte_eth_dev *dev)
1107 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1108 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1109 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1110 uint16_t nb_tx_q = dev->data->nb_rx_queues;
1112 if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
1113 tx_mq_mode == ETH_MQ_TX_DCB ||
1114 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1115 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
1118 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1119 /* Check multi-queue mode.
1120 * To no break software we accept ETH_MQ_RX_NONE as this might
1121 * be used to turn off VLAN filter.
1124 if (rx_mq_mode == ETH_MQ_RX_NONE ||
1125 rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1126 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
1127 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1129 /* Only support one queue on VFs.
1130 * RSS together with SRIOV is not supported.
1132 PMD_INIT_LOG(ERR, "SRIOV is active,"
1133 " wrong mq_mode rx %d.",
1137 /* TX mode is not used here, so mode might be ignored.*/
1138 if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
1139 /* SRIOV only works in VMDq enable mode */
1140 PMD_INIT_LOG(WARNING, "SRIOV is active,"
1141 " TX mode %d is not supported. "
1142 " Driver will behave as %d mode.",
1143 tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
1146 /* check valid queue number */
1147 if ((nb_rx_q > 1) || (nb_tx_q > 1)) {
1148 PMD_INIT_LOG(ERR, "SRIOV is active,"
1149 " only support one queue on VFs.");
1153 /* To no break software that set invalid mode, only display
1154 * warning if invalid mode is used.
1156 if (rx_mq_mode != ETH_MQ_RX_NONE &&
1157 rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
1158 rx_mq_mode != ETH_MQ_RX_RSS) {
1159 /* RSS together with VMDq not supported*/
1160 PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
1165 if (tx_mq_mode != ETH_MQ_TX_NONE &&
1166 tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
1167 PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
1168 " Due to txmode is meaningless in this"
1169 " driver, just ignore.",
1177 eth_igb_configure(struct rte_eth_dev *dev)
1179 struct e1000_interrupt *intr =
1180 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1183 PMD_INIT_FUNC_TRACE();
1185 /* multipe queue mode checking */
1186 ret = igb_check_mq_mode(dev);
1188 PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
1193 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1194 PMD_INIT_FUNC_TRACE();
1200 eth_igb_start(struct rte_eth_dev *dev)
1202 struct e1000_hw *hw =
1203 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1204 struct e1000_adapter *adapter =
1205 E1000_DEV_PRIVATE(dev->data->dev_private);
1206 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1208 uint32_t intr_vector = 0;
1214 PMD_INIT_FUNC_TRACE();
1216 /* disable uio/vfio intr/eventfd mapping */
1217 rte_intr_disable(intr_handle);
1219 /* Power up the phy. Needed to make the link go Up */
1220 eth_igb_dev_set_link_up(dev);
1223 * Packet Buffer Allocation (PBA)
1224 * Writing PBA sets the receive portion of the buffer
1225 * the remainder is used for the transmit buffer.
1227 if (hw->mac.type == e1000_82575) {
1230 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1231 E1000_WRITE_REG(hw, E1000_PBA, pba);
1234 /* Put the address into the Receive Address Array */
1235 e1000_rar_set(hw, hw->mac.addr, 0);
1237 /* Initialize the hardware */
1238 if (igb_hardware_init(hw)) {
1239 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
1242 adapter->stopped = 0;
1244 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1246 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1247 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1248 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
1249 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1250 E1000_WRITE_FLUSH(hw);
1252 /* configure PF module if SRIOV enabled */
1253 igb_pf_host_configure(dev);
1255 /* check and configure queue intr-vector mapping */
1256 if ((rte_intr_cap_multiple(intr_handle) ||
1257 !RTE_ETH_DEV_SRIOV(dev).active) &&
1258 dev->data->dev_conf.intr_conf.rxq != 0) {
1259 intr_vector = dev->data->nb_rx_queues;
1260 if (rte_intr_efd_enable(intr_handle, intr_vector))
1264 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1265 intr_handle->intr_vec =
1266 rte_zmalloc("intr_vec",
1267 dev->data->nb_rx_queues * sizeof(int), 0);
1268 if (intr_handle->intr_vec == NULL) {
1269 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1270 " intr_vec\n", dev->data->nb_rx_queues);
1275 /* confiugre msix for rx interrupt */
1276 eth_igb_configure_msix_intr(dev);
1278 /* Configure for OS presence */
1279 igb_init_manageability(hw);
1281 eth_igb_tx_init(dev);
1283 /* This can fail when allocating mbufs for descriptor rings */
1284 ret = eth_igb_rx_init(dev);
1286 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1287 igb_dev_clear_queues(dev);
1291 e1000_clear_hw_cntrs_base_generic(hw);
1294 * VLAN Offload Settings
1296 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
1297 ETH_VLAN_EXTEND_MASK;
1298 eth_igb_vlan_offload_set(dev, mask);
1300 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1301 /* Enable VLAN filter since VMDq always use VLAN filter */
1302 igb_vmdq_vlan_hw_filter_enable(dev);
1305 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
1306 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
1307 (hw->mac.type == e1000_i211)) {
1308 /* Configure EITR with the maximum possible value (0xFFFF) */
1309 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
1312 /* Setup link speed and duplex */
1313 speeds = &dev->data->dev_conf.link_speeds;
1314 if (*speeds == ETH_LINK_SPEED_AUTONEG) {
1315 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
1318 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
1321 hw->phy.autoneg_advertised = 0;
1323 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
1324 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
1325 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
1327 goto error_invalid_config;
1329 if (*speeds & ETH_LINK_SPEED_10M_HD) {
1330 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
1333 if (*speeds & ETH_LINK_SPEED_10M) {
1334 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
1337 if (*speeds & ETH_LINK_SPEED_100M_HD) {
1338 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
1341 if (*speeds & ETH_LINK_SPEED_100M) {
1342 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
1345 if (*speeds & ETH_LINK_SPEED_1G) {
1346 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
1349 if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
1350 goto error_invalid_config;
1353 e1000_setup_link(hw);
1355 if (rte_intr_allow_others(intr_handle)) {
1356 /* check if lsc interrupt is enabled */
1357 if (dev->data->dev_conf.intr_conf.lsc != 0)
1358 eth_igb_lsc_interrupt_setup(dev);
1360 rte_intr_callback_unregister(intr_handle,
1361 eth_igb_interrupt_handler,
1363 if (dev->data->dev_conf.intr_conf.lsc != 0)
1364 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1365 " no intr multiplex\n");
1368 /* check if rxq interrupt is enabled */
1369 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1370 rte_intr_dp_is_en(intr_handle))
1371 eth_igb_rxq_interrupt_setup(dev);
1373 /* enable uio/vfio intr/eventfd mapping */
1374 rte_intr_enable(intr_handle);
1376 /* resume enabled intr since hw reset */
1377 igb_intr_enable(dev);
1379 PMD_INIT_LOG(DEBUG, "<<");
1383 error_invalid_config:
1384 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1385 dev->data->dev_conf.link_speeds, dev->data->port_id);
1386 igb_dev_clear_queues(dev);
1390 /*********************************************************************
1392 * This routine disables all traffic on the adapter by issuing a
1393 * global reset on the MAC.
1395 **********************************************************************/
1397 eth_igb_stop(struct rte_eth_dev *dev)
1399 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1400 struct e1000_filter_info *filter_info =
1401 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1402 struct rte_eth_link link;
1403 struct e1000_flex_filter *p_flex;
1404 struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
1405 struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
1406 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1408 igb_intr_disable(hw);
1410 /* disable intr eventfd mapping */
1411 rte_intr_disable(intr_handle);
1413 igb_pf_reset_hw(hw);
1414 E1000_WRITE_REG(hw, E1000_WUC, 0);
1416 /* Set bit for Go Link disconnect */
1417 if (hw->mac.type >= e1000_82580) {
1420 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1421 phpm_reg |= E1000_82580_PM_GO_LINKD;
1422 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1425 /* Power down the phy. Needed to make the link go Down */
1426 eth_igb_dev_set_link_down(dev);
1428 igb_dev_clear_queues(dev);
1430 /* clear the recorded link status */
1431 memset(&link, 0, sizeof(link));
1432 rte_igb_dev_atomic_write_link_status(dev, &link);
1434 /* Remove all flex filters of the device */
1435 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
1436 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
1439 filter_info->flex_mask = 0;
1441 /* Remove all ntuple filters of the device */
1442 for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
1443 p_5tuple != NULL; p_5tuple = p_5tuple_next) {
1444 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
1445 TAILQ_REMOVE(&filter_info->fivetuple_list,
1449 filter_info->fivetuple_mask = 0;
1450 for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
1451 p_2tuple != NULL; p_2tuple = p_2tuple_next) {
1452 p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
1453 TAILQ_REMOVE(&filter_info->twotuple_list,
1457 filter_info->twotuple_mask = 0;
1459 if (!rte_intr_allow_others(intr_handle))
1460 /* resume to the default handler */
1461 rte_intr_callback_register(intr_handle,
1462 eth_igb_interrupt_handler,
1465 /* Clean datapath event and queue/vec mapping */
1466 rte_intr_efd_disable(intr_handle);
1467 if (intr_handle->intr_vec != NULL) {
1468 rte_free(intr_handle->intr_vec);
1469 intr_handle->intr_vec = NULL;
1474 eth_igb_dev_set_link_up(struct rte_eth_dev *dev)
1476 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1478 if (hw->phy.media_type == e1000_media_type_copper)
1479 e1000_power_up_phy(hw);
1481 e1000_power_up_fiber_serdes_link(hw);
1487 eth_igb_dev_set_link_down(struct rte_eth_dev *dev)
1489 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1491 if (hw->phy.media_type == e1000_media_type_copper)
1492 e1000_power_down_phy(hw);
1494 e1000_shutdown_fiber_serdes_link(hw);
1500 eth_igb_close(struct rte_eth_dev *dev)
1502 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1503 struct e1000_adapter *adapter =
1504 E1000_DEV_PRIVATE(dev->data->dev_private);
1505 struct rte_eth_link link;
1506 struct rte_pci_device *pci_dev;
1509 adapter->stopped = 1;
1511 e1000_phy_hw_reset(hw);
1512 igb_release_manageability(hw);
1513 igb_hw_control_release(hw);
1515 /* Clear bit for Go Link disconnect */
1516 if (hw->mac.type >= e1000_82580) {
1519 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1520 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1521 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1524 igb_dev_free_queues(dev);
1526 pci_dev = dev->pci_dev;
1527 if (pci_dev->intr_handle.intr_vec) {
1528 rte_free(pci_dev->intr_handle.intr_vec);
1529 pci_dev->intr_handle.intr_vec = NULL;
1532 memset(&link, 0, sizeof(link));
1533 rte_igb_dev_atomic_write_link_status(dev, &link);
1537 igb_get_rx_buffer_size(struct e1000_hw *hw)
1539 uint32_t rx_buf_size;
1540 if (hw->mac.type == e1000_82576) {
1541 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1542 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1543 /* PBS needs to be translated according to a lookup table */
1544 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1545 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1546 rx_buf_size = (rx_buf_size << 10);
1547 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1548 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1550 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1556 /*********************************************************************
1558 * Initialize the hardware
1560 **********************************************************************/
1562 igb_hardware_init(struct e1000_hw *hw)
1564 uint32_t rx_buf_size;
1567 /* Let the firmware know the OS is in control */
1568 igb_hw_control_acquire(hw);
1571 * These parameters control the automatic generation (Tx) and
1572 * response (Rx) to Ethernet PAUSE frames.
1573 * - High water mark should allow for at least two standard size (1518)
1574 * frames to be received after sending an XOFF.
1575 * - Low water mark works best when it is very near the high water mark.
1576 * This allows the receiver to restart by sending XON when it has
1577 * drained a bit. Here we use an arbitrary value of 1500 which will
1578 * restart after one full frame is pulled from the buffer. There
1579 * could be several smaller frames in the buffer and if so they will
1580 * not trigger the XON until their total number reduces the buffer
1582 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1584 rx_buf_size = igb_get_rx_buffer_size(hw);
1586 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1587 hw->fc.low_water = hw->fc.high_water - 1500;
1588 hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1589 hw->fc.send_xon = 1;
1591 /* Set Flow control, use the tunable location if sane */
1592 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1593 hw->fc.requested_mode = igb_fc_setting;
1595 hw->fc.requested_mode = e1000_fc_none;
1597 /* Issue a global reset */
1598 igb_pf_reset_hw(hw);
1599 E1000_WRITE_REG(hw, E1000_WUC, 0);
1601 diag = e1000_init_hw(hw);
1605 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1606 e1000_get_phy_info(hw);
1607 e1000_check_for_link(hw);
1612 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1614 igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
1618 uint64_t old_gprc = stats->gprc;
1619 uint64_t old_gptc = stats->gptc;
1620 uint64_t old_tpr = stats->tpr;
1621 uint64_t old_tpt = stats->tpt;
1622 uint64_t old_rpthc = stats->rpthc;
1623 uint64_t old_hgptc = stats->hgptc;
1625 if(hw->phy.media_type == e1000_media_type_copper ||
1626 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1628 E1000_READ_REG(hw,E1000_SYMERRS);
1629 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1632 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1633 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1634 stats->scc += E1000_READ_REG(hw, E1000_SCC);
1635 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1637 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1638 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1639 stats->colc += E1000_READ_REG(hw, E1000_COLC);
1640 stats->dc += E1000_READ_REG(hw, E1000_DC);
1641 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1642 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1643 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1645 ** For watchdog management we need to know if we have been
1646 ** paused during the last interval, so capture that here.
1648 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1649 stats->xoffrxc += pause_frames;
1650 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1651 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1652 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1653 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1654 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1655 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1656 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1657 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1658 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1659 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1660 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1661 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1663 /* For the 64-bit byte counters the low dword must be read first. */
1664 /* Both registers clear on the read of the high dword */
1666 /* Workaround CRC bytes included in size, take away 4 bytes/packet */
1667 stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1668 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1669 stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN;
1670 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1671 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1672 stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN;
1674 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1675 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1676 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1677 stats->roc += E1000_READ_REG(hw, E1000_ROC);
1678 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1680 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1681 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1683 stats->tor += E1000_READ_REG(hw, E1000_TORL);
1684 stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
1685 stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN;
1686 stats->tot += E1000_READ_REG(hw, E1000_TOTL);
1687 stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
1688 stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN;
1690 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1691 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1692 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1693 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1694 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1695 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1696 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1697 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1699 /* Interrupt Counts */
1701 stats->iac += E1000_READ_REG(hw, E1000_IAC);
1702 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1703 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1704 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1705 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1706 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1707 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1708 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1709 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1711 /* Host to Card Statistics */
1713 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1714 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1715 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1716 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1717 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1718 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1719 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1720 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1721 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1722 stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN;
1723 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1724 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1725 stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN;
1726 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1727 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1728 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1730 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1731 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1732 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1733 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1734 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1735 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1739 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1741 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1742 struct e1000_hw_stats *stats =
1743 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1745 igb_read_stats_registers(hw, stats);
1747 if (rte_stats == NULL)
1751 rte_stats->imissed = stats->mpc;
1752 rte_stats->ierrors = stats->crcerrs +
1753 stats->rlec + stats->ruc + stats->roc +
1754 stats->rxerrc + stats->algnerrc + stats->cexterr;
1757 rte_stats->oerrors = stats->ecol + stats->latecol;
1759 rte_stats->ipackets = stats->gprc;
1760 rte_stats->opackets = stats->gptc;
1761 rte_stats->ibytes = stats->gorc;
1762 rte_stats->obytes = stats->gotc;
1766 eth_igb_stats_reset(struct rte_eth_dev *dev)
1768 struct e1000_hw_stats *hw_stats =
1769 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1771 /* HW registers are cleared on read */
1772 eth_igb_stats_get(dev, NULL);
1774 /* Reset software totals */
1775 memset(hw_stats, 0, sizeof(*hw_stats));
1779 eth_igb_xstats_reset(struct rte_eth_dev *dev)
1781 struct e1000_hw_stats *stats =
1782 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1784 /* HW registers are cleared on read */
1785 eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS);
1787 /* Reset software totals */
1788 memset(stats, 0, sizeof(*stats));
1791 static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1792 struct rte_eth_xstat_name *xstats_names,
1793 __rte_unused unsigned limit)
1797 if (xstats_names == NULL)
1798 return IGB_NB_XSTATS;
1800 /* Note: limit checked in rte_eth_xstats_names() */
1802 for (i = 0; i < IGB_NB_XSTATS; i++) {
1803 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
1804 "%s", rte_igb_stats_strings[i].name);
1807 return IGB_NB_XSTATS;
1811 eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1814 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1815 struct e1000_hw_stats *hw_stats =
1816 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1819 if (n < IGB_NB_XSTATS)
1820 return IGB_NB_XSTATS;
1822 igb_read_stats_registers(hw, hw_stats);
1824 /* If this is a reset xstats is NULL, and we have cleared the
1825 * registers by reading them.
1830 /* Extended stats */
1831 for (i = 0; i < IGB_NB_XSTATS; i++) {
1833 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1834 rte_igb_stats_strings[i].offset);
1837 return IGB_NB_XSTATS;
1841 igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
1843 /* Good Rx packets, include VF loopback */
1844 UPDATE_VF_STAT(E1000_VFGPRC,
1845 hw_stats->last_gprc, hw_stats->gprc);
1847 /* Good Rx octets, include VF loopback */
1848 UPDATE_VF_STAT(E1000_VFGORC,
1849 hw_stats->last_gorc, hw_stats->gorc);
1851 /* Good Tx packets, include VF loopback */
1852 UPDATE_VF_STAT(E1000_VFGPTC,
1853 hw_stats->last_gptc, hw_stats->gptc);
1855 /* Good Tx octets, include VF loopback */
1856 UPDATE_VF_STAT(E1000_VFGOTC,
1857 hw_stats->last_gotc, hw_stats->gotc);
1859 /* Rx Multicst packets */
1860 UPDATE_VF_STAT(E1000_VFMPRC,
1861 hw_stats->last_mprc, hw_stats->mprc);
1863 /* Good Rx loopback packets */
1864 UPDATE_VF_STAT(E1000_VFGPRLBC,
1865 hw_stats->last_gprlbc, hw_stats->gprlbc);
1867 /* Good Rx loopback octets */
1868 UPDATE_VF_STAT(E1000_VFGORLBC,
1869 hw_stats->last_gorlbc, hw_stats->gorlbc);
1871 /* Good Tx loopback packets */
1872 UPDATE_VF_STAT(E1000_VFGPTLBC,
1873 hw_stats->last_gptlbc, hw_stats->gptlbc);
1875 /* Good Tx loopback octets */
1876 UPDATE_VF_STAT(E1000_VFGOTLBC,
1877 hw_stats->last_gotlbc, hw_stats->gotlbc);
1880 static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1881 struct rte_eth_xstat_name *xstats_names,
1882 __rte_unused unsigned limit)
1886 if (xstats_names != NULL)
1887 for (i = 0; i < IGBVF_NB_XSTATS; i++) {
1888 snprintf(xstats_names[i].name,
1889 sizeof(xstats_names[i].name), "%s",
1890 rte_igbvf_stats_strings[i].name);
1892 return IGBVF_NB_XSTATS;
1896 eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1899 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1900 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
1901 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1904 if (n < IGBVF_NB_XSTATS)
1905 return IGBVF_NB_XSTATS;
1907 igbvf_read_stats_registers(hw, hw_stats);
1912 for (i = 0; i < IGBVF_NB_XSTATS; i++) {
1914 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1915 rte_igbvf_stats_strings[i].offset);
1918 return IGBVF_NB_XSTATS;
1922 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1924 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1925 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
1926 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1928 igbvf_read_stats_registers(hw, hw_stats);
1930 if (rte_stats == NULL)
1933 rte_stats->ipackets = hw_stats->gprc;
1934 rte_stats->ibytes = hw_stats->gorc;
1935 rte_stats->opackets = hw_stats->gptc;
1936 rte_stats->obytes = hw_stats->gotc;
1940 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1942 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1943 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1945 /* Sync HW register to the last stats */
1946 eth_igbvf_stats_get(dev, NULL);
1948 /* reset HW current stats*/
1949 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1950 offsetof(struct e1000_vf_stats, gprc));
1954 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1956 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1958 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1959 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1960 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1961 dev_info->rx_offload_capa =
1962 DEV_RX_OFFLOAD_VLAN_STRIP |
1963 DEV_RX_OFFLOAD_IPV4_CKSUM |
1964 DEV_RX_OFFLOAD_UDP_CKSUM |
1965 DEV_RX_OFFLOAD_TCP_CKSUM;
1966 dev_info->tx_offload_capa =
1967 DEV_TX_OFFLOAD_VLAN_INSERT |
1968 DEV_TX_OFFLOAD_IPV4_CKSUM |
1969 DEV_TX_OFFLOAD_UDP_CKSUM |
1970 DEV_TX_OFFLOAD_TCP_CKSUM |
1971 DEV_TX_OFFLOAD_SCTP_CKSUM |
1972 DEV_TX_OFFLOAD_TCP_TSO;
1974 switch (hw->mac.type) {
1976 dev_info->max_rx_queues = 4;
1977 dev_info->max_tx_queues = 4;
1978 dev_info->max_vmdq_pools = 0;
1982 dev_info->max_rx_queues = 16;
1983 dev_info->max_tx_queues = 16;
1984 dev_info->max_vmdq_pools = ETH_8_POOLS;
1985 dev_info->vmdq_queue_num = 16;
1989 dev_info->max_rx_queues = 8;
1990 dev_info->max_tx_queues = 8;
1991 dev_info->max_vmdq_pools = ETH_8_POOLS;
1992 dev_info->vmdq_queue_num = 8;
1996 dev_info->max_rx_queues = 8;
1997 dev_info->max_tx_queues = 8;
1998 dev_info->max_vmdq_pools = ETH_8_POOLS;
1999 dev_info->vmdq_queue_num = 8;
2003 dev_info->max_rx_queues = 8;
2004 dev_info->max_tx_queues = 8;
2008 dev_info->max_rx_queues = 4;
2009 dev_info->max_tx_queues = 4;
2010 dev_info->max_vmdq_pools = 0;
2014 dev_info->max_rx_queues = 2;
2015 dev_info->max_tx_queues = 2;
2016 dev_info->max_vmdq_pools = 0;
2020 /* Should not happen */
2023 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
2024 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2025 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
2027 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2029 .pthresh = IGB_DEFAULT_RX_PTHRESH,
2030 .hthresh = IGB_DEFAULT_RX_HTHRESH,
2031 .wthresh = IGB_DEFAULT_RX_WTHRESH,
2033 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
2037 dev_info->default_txconf = (struct rte_eth_txconf) {
2039 .pthresh = IGB_DEFAULT_TX_PTHRESH,
2040 .hthresh = IGB_DEFAULT_TX_HTHRESH,
2041 .wthresh = IGB_DEFAULT_TX_WTHRESH,
2046 dev_info->rx_desc_lim = rx_desc_lim;
2047 dev_info->tx_desc_lim = tx_desc_lim;
2049 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
2050 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
2054 static const uint32_t *
2055 eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
2057 static const uint32_t ptypes[] = {
2058 /* refers to igb_rxd_pkt_info_to_pkt_type() */
2061 RTE_PTYPE_L3_IPV4_EXT,
2063 RTE_PTYPE_L3_IPV6_EXT,
2067 RTE_PTYPE_TUNNEL_IP,
2068 RTE_PTYPE_INNER_L3_IPV6,
2069 RTE_PTYPE_INNER_L3_IPV6_EXT,
2070 RTE_PTYPE_INNER_L4_TCP,
2071 RTE_PTYPE_INNER_L4_UDP,
2075 if (dev->rx_pkt_burst == eth_igb_recv_pkts ||
2076 dev->rx_pkt_burst == eth_igb_recv_scattered_pkts)
2082 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2084 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2086 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
2087 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
2088 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
2089 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
2090 DEV_RX_OFFLOAD_IPV4_CKSUM |
2091 DEV_RX_OFFLOAD_UDP_CKSUM |
2092 DEV_RX_OFFLOAD_TCP_CKSUM;
2093 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
2094 DEV_TX_OFFLOAD_IPV4_CKSUM |
2095 DEV_TX_OFFLOAD_UDP_CKSUM |
2096 DEV_TX_OFFLOAD_TCP_CKSUM |
2097 DEV_TX_OFFLOAD_SCTP_CKSUM |
2098 DEV_TX_OFFLOAD_TCP_TSO;
2099 switch (hw->mac.type) {
2101 dev_info->max_rx_queues = 2;
2102 dev_info->max_tx_queues = 2;
2104 case e1000_vfadapt_i350:
2105 dev_info->max_rx_queues = 1;
2106 dev_info->max_tx_queues = 1;
2109 /* Should not happen */
2113 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2115 .pthresh = IGB_DEFAULT_RX_PTHRESH,
2116 .hthresh = IGB_DEFAULT_RX_HTHRESH,
2117 .wthresh = IGB_DEFAULT_RX_WTHRESH,
2119 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
2123 dev_info->default_txconf = (struct rte_eth_txconf) {
2125 .pthresh = IGB_DEFAULT_TX_PTHRESH,
2126 .hthresh = IGB_DEFAULT_TX_HTHRESH,
2127 .wthresh = IGB_DEFAULT_TX_WTHRESH,
2132 dev_info->rx_desc_lim = rx_desc_lim;
2133 dev_info->tx_desc_lim = tx_desc_lim;
2136 /* return 0 means link status changed, -1 means not changed */
2138 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2140 struct e1000_hw *hw =
2141 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2142 struct rte_eth_link link, old;
2143 int link_check, count;
2146 hw->mac.get_link_status = 1;
2148 /* possible wait-to-complete in up to 9 seconds */
2149 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
2150 /* Read the real link status */
2151 switch (hw->phy.media_type) {
2152 case e1000_media_type_copper:
2153 /* Do the work to read phy */
2154 e1000_check_for_link(hw);
2155 link_check = !hw->mac.get_link_status;
2158 case e1000_media_type_fiber:
2159 e1000_check_for_link(hw);
2160 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2164 case e1000_media_type_internal_serdes:
2165 e1000_check_for_link(hw);
2166 link_check = hw->mac.serdes_has_link;
2169 /* VF device is type_unknown */
2170 case e1000_media_type_unknown:
2171 eth_igbvf_link_update(hw);
2172 link_check = !hw->mac.get_link_status;
2178 if (link_check || wait_to_complete == 0)
2180 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
2182 memset(&link, 0, sizeof(link));
2183 rte_igb_dev_atomic_read_link_status(dev, &link);
2186 /* Now we check if a transition has happened */
2188 uint16_t duplex, speed;
2189 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
2190 link.link_duplex = (duplex == FULL_DUPLEX) ?
2191 ETH_LINK_FULL_DUPLEX :
2192 ETH_LINK_HALF_DUPLEX;
2193 link.link_speed = speed;
2194 link.link_status = ETH_LINK_UP;
2195 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2196 ETH_LINK_SPEED_FIXED);
2197 } else if (!link_check) {
2198 link.link_speed = 0;
2199 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2200 link.link_status = ETH_LINK_DOWN;
2201 link.link_autoneg = ETH_LINK_SPEED_FIXED;
2203 rte_igb_dev_atomic_write_link_status(dev, &link);
2206 if (old.link_status == link.link_status)
2214 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
2215 * For ASF and Pass Through versions of f/w this means
2216 * that the driver is loaded.
2219 igb_hw_control_acquire(struct e1000_hw *hw)
2223 /* Let firmware know the driver has taken over */
2224 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2225 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2229 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
2230 * For ASF and Pass Through versions of f/w this means that the
2231 * driver is no longer loaded.
2234 igb_hw_control_release(struct e1000_hw *hw)
2238 /* Let firmware taken over control of h/w */
2239 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2240 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
2241 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2245 * Bit of a misnomer, what this really means is
2246 * to enable OS management of the system... aka
2247 * to disable special hardware management features.
2250 igb_init_manageability(struct e1000_hw *hw)
2252 if (e1000_enable_mng_pass_thru(hw)) {
2253 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
2254 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2256 /* disable hardware interception of ARP */
2257 manc &= ~(E1000_MANC_ARP_EN);
2259 /* enable receiving management packets to the host */
2260 manc |= E1000_MANC_EN_MNG2HOST;
2261 manc2h |= 1 << 5; /* Mng Port 623 */
2262 manc2h |= 1 << 6; /* Mng Port 664 */
2263 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
2264 E1000_WRITE_REG(hw, E1000_MANC, manc);
2269 igb_release_manageability(struct e1000_hw *hw)
2271 if (e1000_enable_mng_pass_thru(hw)) {
2272 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2274 manc |= E1000_MANC_ARP_EN;
2275 manc &= ~E1000_MANC_EN_MNG2HOST;
2277 E1000_WRITE_REG(hw, E1000_MANC, manc);
2282 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
2284 struct e1000_hw *hw =
2285 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2288 rctl = E1000_READ_REG(hw, E1000_RCTL);
2289 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2290 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2294 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
2296 struct e1000_hw *hw =
2297 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2300 rctl = E1000_READ_REG(hw, E1000_RCTL);
2301 rctl &= (~E1000_RCTL_UPE);
2302 if (dev->data->all_multicast == 1)
2303 rctl |= E1000_RCTL_MPE;
2305 rctl &= (~E1000_RCTL_MPE);
2306 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2310 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
2312 struct e1000_hw *hw =
2313 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2316 rctl = E1000_READ_REG(hw, E1000_RCTL);
2317 rctl |= E1000_RCTL_MPE;
2318 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2322 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
2324 struct e1000_hw *hw =
2325 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2328 if (dev->data->promiscuous == 1)
2329 return; /* must remain in all_multicast mode */
2330 rctl = E1000_READ_REG(hw, E1000_RCTL);
2331 rctl &= (~E1000_RCTL_MPE);
2332 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2336 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2338 struct e1000_hw *hw =
2339 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2340 struct e1000_vfta * shadow_vfta =
2341 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2346 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
2347 E1000_VFTA_ENTRY_MASK);
2348 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
2349 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
2354 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
2356 /* update local VFTA copy */
2357 shadow_vfta->vfta[vid_idx] = vfta;
2363 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
2364 enum rte_vlan_type vlan_type,
2367 struct e1000_hw *hw =
2368 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2371 qinq = E1000_READ_REG(hw, E1000_CTRL_EXT);
2372 qinq &= E1000_CTRL_EXT_EXT_VLAN;
2374 /* only outer TPID of double VLAN can be configured*/
2375 if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
2376 reg = E1000_READ_REG(hw, E1000_VET);
2377 reg = (reg & (~E1000_VET_VET_EXT)) |
2378 ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
2379 E1000_WRITE_REG(hw, E1000_VET, reg);
2384 /* all other TPID values are read-only*/
2385 PMD_DRV_LOG(ERR, "Not supported");
2391 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2393 struct e1000_hw *hw =
2394 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2397 /* Filter Table Disable */
2398 reg = E1000_READ_REG(hw, E1000_RCTL);
2399 reg &= ~E1000_RCTL_CFIEN;
2400 reg &= ~E1000_RCTL_VFE;
2401 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2405 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2407 struct e1000_hw *hw =
2408 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2409 struct e1000_vfta * shadow_vfta =
2410 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2414 /* Filter Table Enable, CFI not used for packet acceptance */
2415 reg = E1000_READ_REG(hw, E1000_RCTL);
2416 reg &= ~E1000_RCTL_CFIEN;
2417 reg |= E1000_RCTL_VFE;
2418 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2420 /* restore VFTA table */
2421 for (i = 0; i < IGB_VFTA_SIZE; i++)
2422 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
2426 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2428 struct e1000_hw *hw =
2429 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2432 /* VLAN Mode Disable */
2433 reg = E1000_READ_REG(hw, E1000_CTRL);
2434 reg &= ~E1000_CTRL_VME;
2435 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2439 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2441 struct e1000_hw *hw =
2442 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2445 /* VLAN Mode Enable */
2446 reg = E1000_READ_REG(hw, E1000_CTRL);
2447 reg |= E1000_CTRL_VME;
2448 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2452 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2454 struct e1000_hw *hw =
2455 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2458 /* CTRL_EXT: Extended VLAN */
2459 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2460 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
2461 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2463 /* Update maximum packet length */
2464 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
2465 E1000_WRITE_REG(hw, E1000_RLPML,
2466 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2471 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2473 struct e1000_hw *hw =
2474 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2477 /* CTRL_EXT: Extended VLAN */
2478 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2479 reg |= E1000_CTRL_EXT_EXTEND_VLAN;
2480 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2482 /* Update maximum packet length */
2483 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
2484 E1000_WRITE_REG(hw, E1000_RLPML,
2485 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2490 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2492 if(mask & ETH_VLAN_STRIP_MASK){
2493 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2494 igb_vlan_hw_strip_enable(dev);
2496 igb_vlan_hw_strip_disable(dev);
2499 if(mask & ETH_VLAN_FILTER_MASK){
2500 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2501 igb_vlan_hw_filter_enable(dev);
2503 igb_vlan_hw_filter_disable(dev);
2506 if(mask & ETH_VLAN_EXTEND_MASK){
2507 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2508 igb_vlan_hw_extend_enable(dev);
2510 igb_vlan_hw_extend_disable(dev);
2516 * It enables the interrupt mask and then enable the interrupt.
2519 * Pointer to struct rte_eth_dev.
2522 * - On success, zero.
2523 * - On failure, a negative value.
2526 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
2528 struct e1000_interrupt *intr =
2529 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2531 intr->mask |= E1000_ICR_LSC;
2536 /* It clears the interrupt causes and enables the interrupt.
2537 * It will be called once only during nic initialized.
2540 * Pointer to struct rte_eth_dev.
2543 * - On success, zero.
2544 * - On failure, a negative value.
2546 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
2548 uint32_t mask, regval;
2549 struct e1000_hw *hw =
2550 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2551 struct rte_eth_dev_info dev_info;
2553 memset(&dev_info, 0, sizeof(dev_info));
2554 eth_igb_infos_get(dev, &dev_info);
2556 mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
2557 regval = E1000_READ_REG(hw, E1000_EIMS);
2558 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
2564 * It reads ICR and gets interrupt causes, check it and set a bit flag
2565 * to update link status.
2568 * Pointer to struct rte_eth_dev.
2571 * - On success, zero.
2572 * - On failure, a negative value.
2575 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
2578 struct e1000_hw *hw =
2579 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2580 struct e1000_interrupt *intr =
2581 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2583 igb_intr_disable(hw);
2585 /* read-on-clear nic registers here */
2586 icr = E1000_READ_REG(hw, E1000_ICR);
2589 if (icr & E1000_ICR_LSC) {
2590 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
2593 if (icr & E1000_ICR_VMMB)
2594 intr->flags |= E1000_FLAG_MAILBOX;
2600 * It executes link_update after knowing an interrupt is prsent.
2603 * Pointer to struct rte_eth_dev.
2606 * - On success, zero.
2607 * - On failure, a negative value.
2610 eth_igb_interrupt_action(struct rte_eth_dev *dev)
2612 struct e1000_hw *hw =
2613 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2614 struct e1000_interrupt *intr =
2615 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2616 uint32_t tctl, rctl;
2617 struct rte_eth_link link;
2620 if (intr->flags & E1000_FLAG_MAILBOX) {
2621 igb_pf_mbx_process(dev);
2622 intr->flags &= ~E1000_FLAG_MAILBOX;
2625 igb_intr_enable(dev);
2626 rte_intr_enable(&(dev->pci_dev->intr_handle));
2628 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
2629 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
2631 /* set get_link_status to check register later */
2632 hw->mac.get_link_status = 1;
2633 ret = eth_igb_link_update(dev, 0);
2635 /* check if link has changed */
2639 memset(&link, 0, sizeof(link));
2640 rte_igb_dev_atomic_read_link_status(dev, &link);
2641 if (link.link_status) {
2643 " Port %d: Link Up - speed %u Mbps - %s",
2645 (unsigned)link.link_speed,
2646 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2647 "full-duplex" : "half-duplex");
2649 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2650 dev->data->port_id);
2653 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
2654 dev->pci_dev->addr.domain,
2655 dev->pci_dev->addr.bus,
2656 dev->pci_dev->addr.devid,
2657 dev->pci_dev->addr.function);
2658 tctl = E1000_READ_REG(hw, E1000_TCTL);
2659 rctl = E1000_READ_REG(hw, E1000_RCTL);
2660 if (link.link_status) {
2662 tctl |= E1000_TCTL_EN;
2663 rctl |= E1000_RCTL_EN;
2666 tctl &= ~E1000_TCTL_EN;
2667 rctl &= ~E1000_RCTL_EN;
2669 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2670 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2671 E1000_WRITE_FLUSH(hw);
2672 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
2679 * Interrupt handler which shall be registered at first.
2682 * Pointer to interrupt handle.
2684 * The address of parameter (struct rte_eth_dev *) regsitered before.
2690 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
2693 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2695 eth_igb_interrupt_get_status(dev);
2696 eth_igb_interrupt_action(dev);
2700 eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev)
2703 struct e1000_hw *hw =
2704 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2705 struct e1000_interrupt *intr =
2706 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2708 igbvf_intr_disable(hw);
2710 /* read-on-clear nic registers here */
2711 eicr = E1000_READ_REG(hw, E1000_EICR);
2714 if (eicr == E1000_VTIVAR_MISC_MAILBOX)
2715 intr->flags |= E1000_FLAG_MAILBOX;
2720 void igbvf_mbx_process(struct rte_eth_dev *dev)
2722 struct e1000_hw *hw =
2723 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2724 struct e1000_mbx_info *mbx = &hw->mbx;
2727 if (mbx->ops.read(hw, &in_msg, 1, 0))
2730 /* PF reset VF event */
2731 if (in_msg == E1000_PF_CONTROL_MSG)
2732 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET);
2736 eth_igbvf_interrupt_action(struct rte_eth_dev *dev)
2738 struct e1000_interrupt *intr =
2739 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2741 if (intr->flags & E1000_FLAG_MAILBOX) {
2742 igbvf_mbx_process(dev);
2743 intr->flags &= ~E1000_FLAG_MAILBOX;
2746 igbvf_intr_enable(dev);
2747 rte_intr_enable(&dev->pci_dev->intr_handle);
2753 eth_igbvf_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
2756 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2758 eth_igbvf_interrupt_get_status(dev);
2759 eth_igbvf_interrupt_action(dev);
2763 eth_igb_led_on(struct rte_eth_dev *dev)
2765 struct e1000_hw *hw;
2767 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2768 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
2772 eth_igb_led_off(struct rte_eth_dev *dev)
2774 struct e1000_hw *hw;
2776 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2777 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
2781 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2783 struct e1000_hw *hw;
2788 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2789 fc_conf->pause_time = hw->fc.pause_time;
2790 fc_conf->high_water = hw->fc.high_water;
2791 fc_conf->low_water = hw->fc.low_water;
2792 fc_conf->send_xon = hw->fc.send_xon;
2793 fc_conf->autoneg = hw->mac.autoneg;
2796 * Return rx_pause and tx_pause status according to actual setting of
2797 * the TFCE and RFCE bits in the CTRL register.
2799 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2800 if (ctrl & E1000_CTRL_TFCE)
2805 if (ctrl & E1000_CTRL_RFCE)
2810 if (rx_pause && tx_pause)
2811 fc_conf->mode = RTE_FC_FULL;
2813 fc_conf->mode = RTE_FC_RX_PAUSE;
2815 fc_conf->mode = RTE_FC_TX_PAUSE;
2817 fc_conf->mode = RTE_FC_NONE;
2823 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2825 struct e1000_hw *hw;
2827 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
2833 uint32_t rx_buf_size;
2834 uint32_t max_high_water;
2837 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2838 if (fc_conf->autoneg != hw->mac.autoneg)
2840 rx_buf_size = igb_get_rx_buffer_size(hw);
2841 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2843 /* At least reserve one Ethernet frame for watermark */
2844 max_high_water = rx_buf_size - ETHER_MAX_LEN;
2845 if ((fc_conf->high_water > max_high_water) ||
2846 (fc_conf->high_water < fc_conf->low_water)) {
2847 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
2848 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
2852 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
2853 hw->fc.pause_time = fc_conf->pause_time;
2854 hw->fc.high_water = fc_conf->high_water;
2855 hw->fc.low_water = fc_conf->low_water;
2856 hw->fc.send_xon = fc_conf->send_xon;
2858 err = e1000_setup_link_generic(hw);
2859 if (err == E1000_SUCCESS) {
2861 /* check if we want to forward MAC frames - driver doesn't have native
2862 * capability to do that, so we'll write the registers ourselves */
2864 rctl = E1000_READ_REG(hw, E1000_RCTL);
2866 /* set or clear MFLCN.PMCF bit depending on configuration */
2867 if (fc_conf->mac_ctrl_frame_fwd != 0)
2868 rctl |= E1000_RCTL_PMCF;
2870 rctl &= ~E1000_RCTL_PMCF;
2872 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2873 E1000_WRITE_FLUSH(hw);
2878 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
2882 #define E1000_RAH_POOLSEL_SHIFT (18)
2884 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2885 uint32_t index, __rte_unused uint32_t pool)
2887 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2890 e1000_rar_set(hw, mac_addr->addr_bytes, index);
2891 rah = E1000_READ_REG(hw, E1000_RAH(index));
2892 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
2893 E1000_WRITE_REG(hw, E1000_RAH(index), rah);
2897 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
2899 uint8_t addr[ETHER_ADDR_LEN];
2900 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2902 memset(addr, 0, sizeof(addr));
2904 e1000_rar_set(hw, addr, index);
2908 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
2909 struct ether_addr *addr)
2911 eth_igb_rar_clear(dev, 0);
2913 eth_igb_rar_set(dev, (void *)addr, 0, 0);
2916 * Virtual Function operations
2919 igbvf_intr_disable(struct e1000_hw *hw)
2921 PMD_INIT_FUNC_TRACE();
2923 /* Clear interrupt mask to stop from interrupts being generated */
2924 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
2926 E1000_WRITE_FLUSH(hw);
2930 igbvf_stop_adapter(struct rte_eth_dev *dev)
2934 struct rte_eth_dev_info dev_info;
2935 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2937 memset(&dev_info, 0, sizeof(dev_info));
2938 eth_igbvf_infos_get(dev, &dev_info);
2940 /* Clear interrupt mask to stop from interrupts being generated */
2941 igbvf_intr_disable(hw);
2943 /* Clear any pending interrupts, flush previous writes */
2944 E1000_READ_REG(hw, E1000_EICR);
2946 /* Disable the transmit unit. Each queue must be disabled. */
2947 for (i = 0; i < dev_info.max_tx_queues; i++)
2948 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
2950 /* Disable the receive unit by stopping each queue */
2951 for (i = 0; i < dev_info.max_rx_queues; i++) {
2952 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
2953 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
2954 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
2955 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
2959 /* flush all queues disables */
2960 E1000_WRITE_FLUSH(hw);
2964 static int eth_igbvf_link_update(struct e1000_hw *hw)
2966 struct e1000_mbx_info *mbx = &hw->mbx;
2967 struct e1000_mac_info *mac = &hw->mac;
2968 int ret_val = E1000_SUCCESS;
2970 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
2973 * We only want to run this if there has been a rst asserted.
2974 * in this case that could mean a link change, device reset,
2975 * or a virtual function reset
2978 /* If we were hit with a reset or timeout drop the link */
2979 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
2980 mac->get_link_status = TRUE;
2982 if (!mac->get_link_status)
2985 /* if link status is down no point in checking to see if pf is up */
2986 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
2989 /* if we passed all the tests above then the link is up and we no
2990 * longer need to check for link */
2991 mac->get_link_status = FALSE;
2999 igbvf_dev_configure(struct rte_eth_dev *dev)
3001 struct rte_eth_conf* conf = &dev->data->dev_conf;
3003 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
3004 dev->data->port_id);
3007 * VF has no ability to enable/disable HW CRC
3008 * Keep the persistent behavior the same as Host PF
3010 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
3011 if (!conf->rxmode.hw_strip_crc) {
3012 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
3013 conf->rxmode.hw_strip_crc = 1;
3016 if (conf->rxmode.hw_strip_crc) {
3017 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
3018 conf->rxmode.hw_strip_crc = 0;
3026 igbvf_dev_start(struct rte_eth_dev *dev)
3028 struct e1000_hw *hw =
3029 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3030 struct e1000_adapter *adapter =
3031 E1000_DEV_PRIVATE(dev->data->dev_private);
3033 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
3034 uint32_t intr_vector = 0;
3036 PMD_INIT_FUNC_TRACE();
3038 hw->mac.ops.reset_hw(hw);
3039 adapter->stopped = 0;
3042 igbvf_set_vfta_all(dev,1);
3044 eth_igbvf_tx_init(dev);
3046 /* This can fail when allocating mbufs for descriptor rings */
3047 ret = eth_igbvf_rx_init(dev);
3049 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
3050 igb_dev_clear_queues(dev);
3054 /* check and configure queue intr-vector mapping */
3055 if (dev->data->dev_conf.intr_conf.rxq != 0) {
3056 intr_vector = dev->data->nb_rx_queues;
3057 ret = rte_intr_efd_enable(intr_handle, intr_vector);
3062 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3063 intr_handle->intr_vec =
3064 rte_zmalloc("intr_vec",
3065 dev->data->nb_rx_queues * sizeof(int), 0);
3066 if (!intr_handle->intr_vec) {
3067 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
3068 " intr_vec\n", dev->data->nb_rx_queues);
3073 eth_igbvf_configure_msix_intr(dev);
3075 /* enable uio/vfio intr/eventfd mapping */
3076 rte_intr_enable(intr_handle);
3078 /* resume enabled intr since hw reset */
3079 igbvf_intr_enable(dev);
3085 igbvf_dev_stop(struct rte_eth_dev *dev)
3087 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
3089 PMD_INIT_FUNC_TRACE();
3091 igbvf_stop_adapter(dev);
3094 * Clear what we set, but we still keep shadow_vfta to
3095 * restore after device starts
3097 igbvf_set_vfta_all(dev,0);
3099 igb_dev_clear_queues(dev);
3101 /* disable intr eventfd mapping */
3102 rte_intr_disable(intr_handle);
3104 /* Clean datapath event and queue/vec mapping */
3105 rte_intr_efd_disable(intr_handle);
3106 if (intr_handle->intr_vec) {
3107 rte_free(intr_handle->intr_vec);
3108 intr_handle->intr_vec = NULL;
3113 igbvf_dev_close(struct rte_eth_dev *dev)
3115 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3116 struct e1000_adapter *adapter =
3117 E1000_DEV_PRIVATE(dev->data->dev_private);
3118 struct ether_addr addr;
3120 PMD_INIT_FUNC_TRACE();
3124 igbvf_dev_stop(dev);
3125 adapter->stopped = 1;
3126 igb_dev_free_queues(dev);
3129 * reprogram the RAR with a zero mac address,
3130 * to ensure that the VF traffic goes to the PF
3131 * after stop, close and detach of the VF.
3134 memset(&addr, 0, sizeof(addr));
3135 igbvf_default_mac_addr_set(dev, &addr);
3139 igbvf_promiscuous_enable(struct rte_eth_dev *dev)
3141 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3143 /* Set both unicast and multicast promisc */
3144 e1000_promisc_set_vf(hw, e1000_promisc_enabled);
3148 igbvf_promiscuous_disable(struct rte_eth_dev *dev)
3150 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3152 /* If in allmulticast mode leave multicast promisc */
3153 if (dev->data->all_multicast == 1)
3154 e1000_promisc_set_vf(hw, e1000_promisc_multicast);
3156 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
3160 igbvf_allmulticast_enable(struct rte_eth_dev *dev)
3162 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3164 /* In promiscuous mode multicast promisc already set */
3165 if (dev->data->promiscuous == 0)
3166 e1000_promisc_set_vf(hw, e1000_promisc_multicast);
3170 igbvf_allmulticast_disable(struct rte_eth_dev *dev)
3172 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3174 /* In promiscuous mode leave multicast promisc enabled */
3175 if (dev->data->promiscuous == 0)
3176 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
3179 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
3181 struct e1000_mbx_info *mbx = &hw->mbx;
3185 /* After set vlan, vlan strip will also be enabled in igb driver*/
3186 msgbuf[0] = E1000_VF_SET_VLAN;
3188 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
3190 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
3192 err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
3196 err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
3200 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
3201 if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))
3208 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3210 struct e1000_hw *hw =
3211 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3212 struct e1000_vfta * shadow_vfta =
3213 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3214 int i = 0, j = 0, vfta = 0, mask = 1;
3216 for (i = 0; i < IGB_VFTA_SIZE; i++){
3217 vfta = shadow_vfta->vfta[i];
3220 for (j = 0; j < 32; j++){
3223 (uint16_t)((i<<5)+j), on);
3232 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3234 struct e1000_hw *hw =
3235 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3236 struct e1000_vfta * shadow_vfta =
3237 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3238 uint32_t vid_idx = 0;
3239 uint32_t vid_bit = 0;
3242 PMD_INIT_FUNC_TRACE();
3244 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
3245 ret = igbvf_set_vfta(hw, vlan_id, !!on);
3247 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
3250 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3251 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3253 /*Save what we set and retore it after device reset*/
3255 shadow_vfta->vfta[vid_idx] |= vid_bit;
3257 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
3263 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
3265 struct e1000_hw *hw =
3266 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3268 /* index is not used by rar_set() */
3269 hw->mac.ops.rar_set(hw, (void *)addr, 0);
3274 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
3275 struct rte_eth_rss_reta_entry64 *reta_conf,
3280 uint16_t idx, shift;
3281 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3283 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3284 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3285 "(%d) doesn't match the number hardware can supported "
3286 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
3290 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3291 idx = i / RTE_RETA_GROUP_SIZE;
3292 shift = i % RTE_RETA_GROUP_SIZE;
3293 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3297 if (mask == IGB_4_BIT_MASK)
3300 r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3301 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
3302 if (mask & (0x1 << j))
3303 reta |= reta_conf[idx].reta[shift + j] <<
3306 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
3308 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
3315 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
3316 struct rte_eth_rss_reta_entry64 *reta_conf,
3321 uint16_t idx, shift;
3322 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3324 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3325 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3326 "(%d) doesn't match the number hardware can supported "
3327 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
3331 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3332 idx = i / RTE_RETA_GROUP_SIZE;
3333 shift = i % RTE_RETA_GROUP_SIZE;
3334 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3338 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3339 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
3340 if (mask & (0x1 << j))
3341 reta_conf[idx].reta[shift + j] =
3342 ((reta >> (CHAR_BIT * j)) &
3350 #define MAC_TYPE_FILTER_SUP(type) do {\
3351 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
3352 (type) != e1000_82576)\
3357 eth_igb_syn_filter_set(struct rte_eth_dev *dev,
3358 struct rte_eth_syn_filter *filter,
3361 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3362 uint32_t synqf, rfctl;
3364 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3367 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
3370 if (synqf & E1000_SYN_FILTER_ENABLE)
3373 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
3374 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
3376 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3377 if (filter->hig_pri)
3378 rfctl |= E1000_RFCTL_SYNQFP;
3380 rfctl &= ~E1000_RFCTL_SYNQFP;
3382 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
3384 if (!(synqf & E1000_SYN_FILTER_ENABLE))
3389 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
3390 E1000_WRITE_FLUSH(hw);
3395 eth_igb_syn_filter_get(struct rte_eth_dev *dev,
3396 struct rte_eth_syn_filter *filter)
3398 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3399 uint32_t synqf, rfctl;
3401 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
3402 if (synqf & E1000_SYN_FILTER_ENABLE) {
3403 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3404 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
3405 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
3406 E1000_SYN_FILTER_QUEUE_SHIFT);
3414 eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
3415 enum rte_filter_op filter_op,
3418 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3421 MAC_TYPE_FILTER_SUP(hw->mac.type);
3423 if (filter_op == RTE_ETH_FILTER_NOP)
3427 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
3432 switch (filter_op) {
3433 case RTE_ETH_FILTER_ADD:
3434 ret = eth_igb_syn_filter_set(dev,
3435 (struct rte_eth_syn_filter *)arg,
3438 case RTE_ETH_FILTER_DELETE:
3439 ret = eth_igb_syn_filter_set(dev,
3440 (struct rte_eth_syn_filter *)arg,
3443 case RTE_ETH_FILTER_GET:
3444 ret = eth_igb_syn_filter_get(dev,
3445 (struct rte_eth_syn_filter *)arg);
3448 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
3456 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
3457 if ((type) != e1000_82580 && (type) != e1000_i350)\
3461 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
3463 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
3464 struct e1000_2tuple_filter_info *filter_info)
3466 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3468 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3469 return -EINVAL; /* filter index is out of range. */
3470 if (filter->tcp_flags > TCP_FLAG_ALL)
3471 return -EINVAL; /* flags is invalid. */
3473 switch (filter->dst_port_mask) {
3475 filter_info->dst_port_mask = 0;
3476 filter_info->dst_port = filter->dst_port;
3479 filter_info->dst_port_mask = 1;
3482 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3486 switch (filter->proto_mask) {
3488 filter_info->proto_mask = 0;
3489 filter_info->proto = filter->proto;
3492 filter_info->proto_mask = 1;
3495 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3499 filter_info->priority = (uint8_t)filter->priority;
3500 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3501 filter_info->tcp_flags = filter->tcp_flags;
3503 filter_info->tcp_flags = 0;
3508 static inline struct e1000_2tuple_filter *
3509 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
3510 struct e1000_2tuple_filter_info *key)
3512 struct e1000_2tuple_filter *it;
3514 TAILQ_FOREACH(it, filter_list, entries) {
3515 if (memcmp(key, &it->filter_info,
3516 sizeof(struct e1000_2tuple_filter_info)) == 0) {
3524 * igb_add_2tuple_filter - add a 2tuple filter
3527 * dev: Pointer to struct rte_eth_dev.
3528 * ntuple_filter: ponter to the filter that will be added.
3531 * - On success, zero.
3532 * - On failure, a negative value.
3535 igb_add_2tuple_filter(struct rte_eth_dev *dev,
3536 struct rte_eth_ntuple_filter *ntuple_filter)
3538 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3539 struct e1000_filter_info *filter_info =
3540 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3541 struct e1000_2tuple_filter *filter;
3542 uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
3543 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3546 filter = rte_zmalloc("e1000_2tuple_filter",
3547 sizeof(struct e1000_2tuple_filter), 0);
3551 ret = ntuple_filter_to_2tuple(ntuple_filter,
3552 &filter->filter_info);
3557 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3558 &filter->filter_info) != NULL) {
3559 PMD_DRV_LOG(ERR, "filter exists.");
3563 filter->queue = ntuple_filter->queue;
3566 * look for an unused 2tuple filter index,
3567 * and insert the filter to list.
3569 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
3570 if (!(filter_info->twotuple_mask & (1 << i))) {
3571 filter_info->twotuple_mask |= 1 << i;
3573 TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
3579 if (i >= E1000_MAX_TTQF_FILTERS) {
3580 PMD_DRV_LOG(ERR, "2tuple filters are full.");
3585 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3586 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3587 imir |= E1000_IMIR_PORT_BP;
3589 imir &= ~E1000_IMIR_PORT_BP;
3591 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3593 ttqf |= E1000_TTQF_QUEUE_ENABLE;
3594 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
3595 ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
3596 if (filter->filter_info.proto_mask == 0)
3597 ttqf &= ~E1000_TTQF_MASK_ENABLE;
3599 /* tcp flags bits setting. */
3600 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
3601 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
3602 imir_ext |= E1000_IMIREXT_CTRL_URG;
3603 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
3604 imir_ext |= E1000_IMIREXT_CTRL_ACK;
3605 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
3606 imir_ext |= E1000_IMIREXT_CTRL_PSH;
3607 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
3608 imir_ext |= E1000_IMIREXT_CTRL_RST;
3609 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
3610 imir_ext |= E1000_IMIREXT_CTRL_SYN;
3611 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
3612 imir_ext |= E1000_IMIREXT_CTRL_FIN;
3614 imir_ext |= E1000_IMIREXT_CTRL_BP;
3615 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3616 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
3617 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3622 * igb_remove_2tuple_filter - remove a 2tuple filter
3625 * dev: Pointer to struct rte_eth_dev.
3626 * ntuple_filter: ponter to the filter that will be removed.
3629 * - On success, zero.
3630 * - On failure, a negative value.
3633 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
3634 struct rte_eth_ntuple_filter *ntuple_filter)
3636 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3637 struct e1000_filter_info *filter_info =
3638 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3639 struct e1000_2tuple_filter_info filter_2tuple;
3640 struct e1000_2tuple_filter *filter;
3643 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
3644 ret = ntuple_filter_to_2tuple(ntuple_filter,
3649 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3651 if (filter == NULL) {
3652 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3656 filter_info->twotuple_mask &= ~(1 << filter->index);
3657 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
3660 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
3661 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3662 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3666 static inline struct e1000_flex_filter *
3667 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
3668 struct e1000_flex_filter_info *key)
3670 struct e1000_flex_filter *it;
3672 TAILQ_FOREACH(it, filter_list, entries) {
3673 if (memcmp(key, &it->filter_info,
3674 sizeof(struct e1000_flex_filter_info)) == 0)
3682 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
3683 struct rte_eth_flex_filter *filter,
3686 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3687 struct e1000_filter_info *filter_info =
3688 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3689 struct e1000_flex_filter *flex_filter, *it;
3690 uint32_t wufc, queueing, mask;
3692 uint8_t shift, i, j = 0;
3694 flex_filter = rte_zmalloc("e1000_flex_filter",
3695 sizeof(struct e1000_flex_filter), 0);
3696 if (flex_filter == NULL)
3699 flex_filter->filter_info.len = filter->len;
3700 flex_filter->filter_info.priority = filter->priority;
3701 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
3702 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
3704 /* reverse bits in flex filter's mask*/
3705 for (shift = 0; shift < CHAR_BIT; shift++) {
3706 if (filter->mask[i] & (0x01 << shift))
3707 mask |= (0x80 >> shift);
3709 flex_filter->filter_info.mask[i] = mask;
3712 wufc = E1000_READ_REG(hw, E1000_WUFC);
3713 if (flex_filter->index < E1000_MAX_FHFT)
3714 reg_off = E1000_FHFT(flex_filter->index);
3716 reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
3719 if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
3720 &flex_filter->filter_info) != NULL) {
3721 PMD_DRV_LOG(ERR, "filter exists.");
3722 rte_free(flex_filter);
3725 flex_filter->queue = filter->queue;
3727 * look for an unused flex filter index
3728 * and insert the filter into the list.
3730 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
3731 if (!(filter_info->flex_mask & (1 << i))) {
3732 filter_info->flex_mask |= 1 << i;
3733 flex_filter->index = i;
3734 TAILQ_INSERT_TAIL(&filter_info->flex_list,
3740 if (i >= E1000_MAX_FLEX_FILTERS) {
3741 PMD_DRV_LOG(ERR, "flex filters are full.");
3742 rte_free(flex_filter);
3746 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
3747 (E1000_WUFC_FLX0 << flex_filter->index));
3748 queueing = filter->len |
3749 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
3750 (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
3751 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
3753 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
3754 E1000_WRITE_REG(hw, reg_off,
3755 flex_filter->filter_info.dwords[j]);
3756 reg_off += sizeof(uint32_t);
3757 E1000_WRITE_REG(hw, reg_off,
3758 flex_filter->filter_info.dwords[++j]);
3759 reg_off += sizeof(uint32_t);
3760 E1000_WRITE_REG(hw, reg_off,
3761 (uint32_t)flex_filter->filter_info.mask[i]);
3762 reg_off += sizeof(uint32_t) * 2;
3766 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3767 &flex_filter->filter_info);
3769 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3770 rte_free(flex_filter);
3774 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
3775 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
3776 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
3777 (~(E1000_WUFC_FLX0 << it->index)));
3779 filter_info->flex_mask &= ~(1 << it->index);
3780 TAILQ_REMOVE(&filter_info->flex_list, it, entries);
3782 rte_free(flex_filter);
3789 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
3790 struct rte_eth_flex_filter *filter)
3792 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3793 struct e1000_filter_info *filter_info =
3794 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3795 struct e1000_flex_filter flex_filter, *it;
3796 uint32_t wufc, queueing, wufc_en = 0;
3798 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
3799 flex_filter.filter_info.len = filter->len;
3800 flex_filter.filter_info.priority = filter->priority;
3801 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
3802 memcpy(flex_filter.filter_info.mask, filter->mask,
3803 RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
3805 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3806 &flex_filter.filter_info);
3808 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3812 wufc = E1000_READ_REG(hw, E1000_WUFC);
3813 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
3815 if ((wufc & wufc_en) == wufc_en) {
3816 uint32_t reg_off = 0;
3817 if (it->index < E1000_MAX_FHFT)
3818 reg_off = E1000_FHFT(it->index);
3820 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
3822 queueing = E1000_READ_REG(hw,
3823 reg_off + E1000_FHFT_QUEUEING_OFFSET);
3824 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
3825 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
3826 E1000_FHFT_QUEUEING_PRIO_SHIFT;
3827 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
3828 E1000_FHFT_QUEUEING_QUEUE_SHIFT;
3835 eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
3836 enum rte_filter_op filter_op,
3839 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3840 struct rte_eth_flex_filter *filter;
3843 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
3845 if (filter_op == RTE_ETH_FILTER_NOP)
3849 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
3854 filter = (struct rte_eth_flex_filter *)arg;
3855 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
3856 || filter->len % sizeof(uint64_t) != 0) {
3857 PMD_DRV_LOG(ERR, "filter's length is out of range");
3860 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
3861 PMD_DRV_LOG(ERR, "filter's priority is out of range");
3865 switch (filter_op) {
3866 case RTE_ETH_FILTER_ADD:
3867 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
3869 case RTE_ETH_FILTER_DELETE:
3870 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
3872 case RTE_ETH_FILTER_GET:
3873 ret = eth_igb_get_flex_filter(dev, filter);
3876 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
3884 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
3886 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
3887 struct e1000_5tuple_filter_info *filter_info)
3889 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
3891 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3892 return -EINVAL; /* filter index is out of range. */
3893 if (filter->tcp_flags > TCP_FLAG_ALL)
3894 return -EINVAL; /* flags is invalid. */
3896 switch (filter->dst_ip_mask) {
3898 filter_info->dst_ip_mask = 0;
3899 filter_info->dst_ip = filter->dst_ip;
3902 filter_info->dst_ip_mask = 1;
3905 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3909 switch (filter->src_ip_mask) {
3911 filter_info->src_ip_mask = 0;
3912 filter_info->src_ip = filter->src_ip;
3915 filter_info->src_ip_mask = 1;
3918 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3922 switch (filter->dst_port_mask) {
3924 filter_info->dst_port_mask = 0;
3925 filter_info->dst_port = filter->dst_port;
3928 filter_info->dst_port_mask = 1;
3931 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3935 switch (filter->src_port_mask) {
3937 filter_info->src_port_mask = 0;
3938 filter_info->src_port = filter->src_port;
3941 filter_info->src_port_mask = 1;
3944 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3948 switch (filter->proto_mask) {
3950 filter_info->proto_mask = 0;
3951 filter_info->proto = filter->proto;
3954 filter_info->proto_mask = 1;
3957 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3961 filter_info->priority = (uint8_t)filter->priority;
3962 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3963 filter_info->tcp_flags = filter->tcp_flags;
3965 filter_info->tcp_flags = 0;
3970 static inline struct e1000_5tuple_filter *
3971 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
3972 struct e1000_5tuple_filter_info *key)
3974 struct e1000_5tuple_filter *it;
3976 TAILQ_FOREACH(it, filter_list, entries) {
3977 if (memcmp(key, &it->filter_info,
3978 sizeof(struct e1000_5tuple_filter_info)) == 0) {
3986 * igb_add_5tuple_filter_82576 - add a 5tuple filter
3989 * dev: Pointer to struct rte_eth_dev.
3990 * ntuple_filter: ponter to the filter that will be added.
3993 * - On success, zero.
3994 * - On failure, a negative value.
3997 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
3998 struct rte_eth_ntuple_filter *ntuple_filter)
4000 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4001 struct e1000_filter_info *filter_info =
4002 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4003 struct e1000_5tuple_filter *filter;
4004 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
4005 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
4009 filter = rte_zmalloc("e1000_5tuple_filter",
4010 sizeof(struct e1000_5tuple_filter), 0);
4014 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4015 &filter->filter_info);
4021 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
4022 &filter->filter_info) != NULL) {
4023 PMD_DRV_LOG(ERR, "filter exists.");
4027 filter->queue = ntuple_filter->queue;
4030 * look for an unused 5tuple filter index,
4031 * and insert the filter to list.
4033 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
4034 if (!(filter_info->fivetuple_mask & (1 << i))) {
4035 filter_info->fivetuple_mask |= 1 << i;
4037 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
4043 if (i >= E1000_MAX_FTQF_FILTERS) {
4044 PMD_DRV_LOG(ERR, "5tuple filters are full.");
4049 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
4050 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
4051 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
4052 if (filter->filter_info.dst_ip_mask == 0)
4053 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
4054 if (filter->filter_info.src_port_mask == 0)
4055 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
4056 if (filter->filter_info.proto_mask == 0)
4057 ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
4058 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
4059 E1000_FTQF_QUEUE_MASK;
4060 ftqf |= E1000_FTQF_QUEUE_ENABLE;
4061 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
4062 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
4063 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
4065 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
4066 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
4068 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
4069 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
4070 imir |= E1000_IMIR_PORT_BP;
4072 imir &= ~E1000_IMIR_PORT_BP;
4073 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
4075 /* tcp flags bits setting. */
4076 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
4077 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
4078 imir_ext |= E1000_IMIREXT_CTRL_URG;
4079 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
4080 imir_ext |= E1000_IMIREXT_CTRL_ACK;
4081 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
4082 imir_ext |= E1000_IMIREXT_CTRL_PSH;
4083 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
4084 imir_ext |= E1000_IMIREXT_CTRL_RST;
4085 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
4086 imir_ext |= E1000_IMIREXT_CTRL_SYN;
4087 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
4088 imir_ext |= E1000_IMIREXT_CTRL_FIN;
4090 imir_ext |= E1000_IMIREXT_CTRL_BP;
4091 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
4092 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
4097 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
4100 * dev: Pointer to struct rte_eth_dev.
4101 * ntuple_filter: ponter to the filter that will be removed.
4104 * - On success, zero.
4105 * - On failure, a negative value.
4108 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
4109 struct rte_eth_ntuple_filter *ntuple_filter)
4111 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4112 struct e1000_filter_info *filter_info =
4113 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4114 struct e1000_5tuple_filter_info filter_5tuple;
4115 struct e1000_5tuple_filter *filter;
4118 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
4119 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4124 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
4126 if (filter == NULL) {
4127 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4131 filter_info->fivetuple_mask &= ~(1 << filter->index);
4132 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
4135 E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
4136 E1000_FTQF_VF_BP | E1000_FTQF_MASK);
4137 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
4138 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
4139 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
4140 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
4141 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
4146 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4149 struct e1000_hw *hw;
4150 struct rte_eth_dev_info dev_info;
4151 uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
4154 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4156 #ifdef RTE_LIBRTE_82571_SUPPORT
4157 /* XXX: not bigger than max_rx_pktlen */
4158 if (hw->mac.type == e1000_82571)
4161 eth_igb_infos_get(dev, &dev_info);
4163 /* check that mtu is within the allowed range */
4164 if ((mtu < ETHER_MIN_MTU) ||
4165 (frame_size > dev_info.max_rx_pktlen))
4168 /* refuse mtu that requires the support of scattered packets when this
4169 * feature has not been enabled before. */
4170 if (!dev->data->scattered_rx &&
4171 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
4174 rctl = E1000_READ_REG(hw, E1000_RCTL);
4176 /* switch to jumbo mode if needed */
4177 if (frame_size > ETHER_MAX_LEN) {
4178 dev->data->dev_conf.rxmode.jumbo_frame = 1;
4179 rctl |= E1000_RCTL_LPE;
4181 dev->data->dev_conf.rxmode.jumbo_frame = 0;
4182 rctl &= ~E1000_RCTL_LPE;
4184 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
4186 /* update max frame size */
4187 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4189 E1000_WRITE_REG(hw, E1000_RLPML,
4190 dev->data->dev_conf.rxmode.max_rx_pkt_len);
4196 * igb_add_del_ntuple_filter - add or delete a ntuple filter
4199 * dev: Pointer to struct rte_eth_dev.
4200 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4201 * add: if true, add filter, if false, remove filter
4204 * - On success, zero.
4205 * - On failure, a negative value.
4208 igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
4209 struct rte_eth_ntuple_filter *ntuple_filter,
4212 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4215 switch (ntuple_filter->flags) {
4216 case RTE_5TUPLE_FLAGS:
4217 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4218 if (hw->mac.type != e1000_82576)
4221 ret = igb_add_5tuple_filter_82576(dev,
4224 ret = igb_remove_5tuple_filter_82576(dev,
4227 case RTE_2TUPLE_FLAGS:
4228 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4229 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
4232 ret = igb_add_2tuple_filter(dev, ntuple_filter);
4234 ret = igb_remove_2tuple_filter(dev, ntuple_filter);
4245 * igb_get_ntuple_filter - get a ntuple filter
4248 * dev: Pointer to struct rte_eth_dev.
4249 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4252 * - On success, zero.
4253 * - On failure, a negative value.
4256 igb_get_ntuple_filter(struct rte_eth_dev *dev,
4257 struct rte_eth_ntuple_filter *ntuple_filter)
4259 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4260 struct e1000_filter_info *filter_info =
4261 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4262 struct e1000_5tuple_filter_info filter_5tuple;
4263 struct e1000_2tuple_filter_info filter_2tuple;
4264 struct e1000_5tuple_filter *p_5tuple_filter;
4265 struct e1000_2tuple_filter *p_2tuple_filter;
4268 switch (ntuple_filter->flags) {
4269 case RTE_5TUPLE_FLAGS:
4270 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4271 if (hw->mac.type != e1000_82576)
4273 memset(&filter_5tuple,
4275 sizeof(struct e1000_5tuple_filter_info));
4276 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4280 p_5tuple_filter = igb_5tuple_filter_lookup_82576(
4281 &filter_info->fivetuple_list,
4283 if (p_5tuple_filter == NULL) {
4284 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4287 ntuple_filter->queue = p_5tuple_filter->queue;
4289 case RTE_2TUPLE_FLAGS:
4290 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4291 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
4293 memset(&filter_2tuple,
4295 sizeof(struct e1000_2tuple_filter_info));
4296 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
4299 p_2tuple_filter = igb_2tuple_filter_lookup(
4300 &filter_info->twotuple_list,
4302 if (p_2tuple_filter == NULL) {
4303 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4306 ntuple_filter->queue = p_2tuple_filter->queue;
4317 * igb_ntuple_filter_handle - Handle operations for ntuple filter.
4318 * @dev: pointer to rte_eth_dev structure
4319 * @filter_op:operation will be taken.
4320 * @arg: a pointer to specific structure corresponding to the filter_op
4323 igb_ntuple_filter_handle(struct rte_eth_dev *dev,
4324 enum rte_filter_op filter_op,
4327 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4330 MAC_TYPE_FILTER_SUP(hw->mac.type);
4332 if (filter_op == RTE_ETH_FILTER_NOP)
4336 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
4341 switch (filter_op) {
4342 case RTE_ETH_FILTER_ADD:
4343 ret = igb_add_del_ntuple_filter(dev,
4344 (struct rte_eth_ntuple_filter *)arg,
4347 case RTE_ETH_FILTER_DELETE:
4348 ret = igb_add_del_ntuple_filter(dev,
4349 (struct rte_eth_ntuple_filter *)arg,
4352 case RTE_ETH_FILTER_GET:
4353 ret = igb_get_ntuple_filter(dev,
4354 (struct rte_eth_ntuple_filter *)arg);
4357 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
4365 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
4370 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4371 if (filter_info->ethertype_filters[i] == ethertype &&
4372 (filter_info->ethertype_mask & (1 << i)))
4379 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
4384 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4385 if (!(filter_info->ethertype_mask & (1 << i))) {
4386 filter_info->ethertype_mask |= 1 << i;
4387 filter_info->ethertype_filters[i] = ethertype;
4395 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
4398 if (idx >= E1000_MAX_ETQF_FILTERS)
4400 filter_info->ethertype_mask &= ~(1 << idx);
4401 filter_info->ethertype_filters[idx] = 0;
4407 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
4408 struct rte_eth_ethertype_filter *filter,
4411 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4412 struct e1000_filter_info *filter_info =
4413 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4417 if (filter->ether_type == ETHER_TYPE_IPv4 ||
4418 filter->ether_type == ETHER_TYPE_IPv6) {
4419 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4420 " ethertype filter.", filter->ether_type);
4424 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4425 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4428 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4429 PMD_DRV_LOG(ERR, "drop option is unsupported.");
4433 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
4434 if (ret >= 0 && add) {
4435 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4436 filter->ether_type);
4439 if (ret < 0 && !add) {
4440 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4441 filter->ether_type);
4446 ret = igb_ethertype_filter_insert(filter_info,
4447 filter->ether_type);
4449 PMD_DRV_LOG(ERR, "ethertype filters are full.");
4453 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
4454 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
4455 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
4457 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
4461 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
4462 E1000_WRITE_FLUSH(hw);
4468 igb_get_ethertype_filter(struct rte_eth_dev *dev,
4469 struct rte_eth_ethertype_filter *filter)
4471 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4472 struct e1000_filter_info *filter_info =
4473 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4477 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
4479 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4480 filter->ether_type);
4484 etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
4485 if (etqf & E1000_ETQF_FILTER_ENABLE) {
4486 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
4488 filter->queue = (etqf & E1000_ETQF_QUEUE) >>
4489 E1000_ETQF_QUEUE_SHIFT;
4497 * igb_ethertype_filter_handle - Handle operations for ethertype filter.
4498 * @dev: pointer to rte_eth_dev structure
4499 * @filter_op:operation will be taken.
4500 * @arg: a pointer to specific structure corresponding to the filter_op
4503 igb_ethertype_filter_handle(struct rte_eth_dev *dev,
4504 enum rte_filter_op filter_op,
4507 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4510 MAC_TYPE_FILTER_SUP(hw->mac.type);
4512 if (filter_op == RTE_ETH_FILTER_NOP)
4516 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
4521 switch (filter_op) {
4522 case RTE_ETH_FILTER_ADD:
4523 ret = igb_add_del_ethertype_filter(dev,
4524 (struct rte_eth_ethertype_filter *)arg,
4527 case RTE_ETH_FILTER_DELETE:
4528 ret = igb_add_del_ethertype_filter(dev,
4529 (struct rte_eth_ethertype_filter *)arg,
4532 case RTE_ETH_FILTER_GET:
4533 ret = igb_get_ethertype_filter(dev,
4534 (struct rte_eth_ethertype_filter *)arg);
4537 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
4545 eth_igb_filter_ctrl(struct rte_eth_dev *dev,
4546 enum rte_filter_type filter_type,
4547 enum rte_filter_op filter_op,
4552 switch (filter_type) {
4553 case RTE_ETH_FILTER_NTUPLE:
4554 ret = igb_ntuple_filter_handle(dev, filter_op, arg);
4556 case RTE_ETH_FILTER_ETHERTYPE:
4557 ret = igb_ethertype_filter_handle(dev, filter_op, arg);
4559 case RTE_ETH_FILTER_SYN:
4560 ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
4562 case RTE_ETH_FILTER_FLEXIBLE:
4563 ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
4566 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4575 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
4576 struct ether_addr *mc_addr_set,
4577 uint32_t nb_mc_addr)
4579 struct e1000_hw *hw;
4581 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4582 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
4587 igb_read_systime_cyclecounter(struct rte_eth_dev *dev)
4589 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4590 uint64_t systime_cycles;
4592 switch (hw->mac.type) {
4596 * Need to read System Time Residue Register to be able
4597 * to read the other two registers.
4599 E1000_READ_REG(hw, E1000_SYSTIMR);
4600 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
4601 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4602 systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4609 * Need to read System Time Residue Register to be able
4610 * to read the other two registers.
4612 E1000_READ_REG(hw, E1000_SYSTIMR);
4613 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4614 /* Only the 8 LSB are valid. */
4615 systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH)
4619 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4620 systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4625 return systime_cycles;
4629 igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4631 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4632 uint64_t rx_tstamp_cycles;
4634 switch (hw->mac.type) {
4637 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
4638 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4639 rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4645 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4646 /* Only the 8 LSB are valid. */
4647 rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH)
4651 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4652 rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4657 return rx_tstamp_cycles;
4661 igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4663 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4664 uint64_t tx_tstamp_cycles;
4666 switch (hw->mac.type) {
4669 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
4670 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4671 tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4677 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4678 /* Only the 8 LSB are valid. */
4679 tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH)
4683 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4684 tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4689 return tx_tstamp_cycles;
4693 igb_start_timecounters(struct rte_eth_dev *dev)
4695 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4696 struct e1000_adapter *adapter =
4697 (struct e1000_adapter *)dev->data->dev_private;
4698 uint32_t incval = 1;
4700 uint64_t mask = E1000_CYCLECOUNTER_MASK;
4702 switch (hw->mac.type) {
4706 /* 32 LSB bits + 8 MSB bits = 40 bits */
4707 mask = (1ULL << 40) - 1;
4712 * Start incrementing the register
4713 * used to timestamp PTP packets.
4715 E1000_WRITE_REG(hw, E1000_TIMINCA, incval);
4718 incval = E1000_INCVALUE_82576;
4719 shift = IGB_82576_TSYNC_SHIFT;
4720 E1000_WRITE_REG(hw, E1000_TIMINCA,
4721 E1000_INCPERIOD_82576 | incval);
4728 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4729 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4730 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4732 adapter->systime_tc.cc_mask = mask;
4733 adapter->systime_tc.cc_shift = shift;
4734 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4736 adapter->rx_tstamp_tc.cc_mask = mask;
4737 adapter->rx_tstamp_tc.cc_shift = shift;
4738 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4740 adapter->tx_tstamp_tc.cc_mask = mask;
4741 adapter->tx_tstamp_tc.cc_shift = shift;
4742 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4746 igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4748 struct e1000_adapter *adapter =
4749 (struct e1000_adapter *)dev->data->dev_private;
4751 adapter->systime_tc.nsec += delta;
4752 adapter->rx_tstamp_tc.nsec += delta;
4753 adapter->tx_tstamp_tc.nsec += delta;
4759 igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4762 struct e1000_adapter *adapter =
4763 (struct e1000_adapter *)dev->data->dev_private;
4765 ns = rte_timespec_to_ns(ts);
4767 /* Set the timecounters to a new value. */
4768 adapter->systime_tc.nsec = ns;
4769 adapter->rx_tstamp_tc.nsec = ns;
4770 adapter->tx_tstamp_tc.nsec = ns;
4776 igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4778 uint64_t ns, systime_cycles;
4779 struct e1000_adapter *adapter =
4780 (struct e1000_adapter *)dev->data->dev_private;
4782 systime_cycles = igb_read_systime_cyclecounter(dev);
4783 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4784 *ts = rte_ns_to_timespec(ns);
4790 igb_timesync_enable(struct rte_eth_dev *dev)
4792 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4796 /* Stop the timesync system time. */
4797 E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0);
4798 /* Reset the timesync system time value. */
4799 switch (hw->mac.type) {
4805 E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0);
4808 E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0);
4809 E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0);
4812 /* Not supported. */
4816 /* Enable system time for it isn't on by default. */
4817 tsauxc = E1000_READ_REG(hw, E1000_TSAUXC);
4818 tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME;
4819 E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc);
4821 igb_start_timecounters(dev);
4823 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4824 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
4826 E1000_ETQF_FILTER_ENABLE |
4829 /* Enable timestamping of received PTP packets. */
4830 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4831 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
4832 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
4834 /* Enable Timestamping of transmitted PTP packets. */
4835 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4836 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
4837 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
4843 igb_timesync_disable(struct rte_eth_dev *dev)
4845 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4848 /* Disable timestamping of transmitted PTP packets. */
4849 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4850 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
4851 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
4853 /* Disable timestamping of received PTP packets. */
4854 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4855 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
4856 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
4858 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4859 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
4861 /* Stop incrementating the System Time registers. */
4862 E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
4868 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4869 struct timespec *timestamp,
4870 uint32_t flags __rte_unused)
4872 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4873 struct e1000_adapter *adapter =
4874 (struct e1000_adapter *)dev->data->dev_private;
4875 uint32_t tsync_rxctl;
4876 uint64_t rx_tstamp_cycles;
4879 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4880 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
4883 rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev);
4884 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4885 *timestamp = rte_ns_to_timespec(ns);
4891 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4892 struct timespec *timestamp)
4894 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4895 struct e1000_adapter *adapter =
4896 (struct e1000_adapter *)dev->data->dev_private;
4897 uint32_t tsync_txctl;
4898 uint64_t tx_tstamp_cycles;
4901 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4902 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
4905 tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev);
4906 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4907 *timestamp = rte_ns_to_timespec(ns);
4913 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4917 const struct reg_info *reg_group;
4919 while ((reg_group = igb_regs[g_ind++]))
4920 count += igb_reg_group_count(reg_group);
4926 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4930 const struct reg_info *reg_group;
4932 while ((reg_group = igbvf_regs[g_ind++]))
4933 count += igb_reg_group_count(reg_group);
4939 eth_igb_get_regs(struct rte_eth_dev *dev,
4940 struct rte_dev_reg_info *regs)
4942 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4943 uint32_t *data = regs->data;
4946 const struct reg_info *reg_group;
4948 /* Support only full register dump */
4949 if ((regs->length == 0) ||
4950 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
4951 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4953 while ((reg_group = igb_regs[g_ind++]))
4954 count += igb_read_regs_group(dev, &data[count],
4963 igbvf_get_regs(struct rte_eth_dev *dev,
4964 struct rte_dev_reg_info *regs)
4966 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4967 uint32_t *data = regs->data;
4970 const struct reg_info *reg_group;
4972 /* Support only full register dump */
4973 if ((regs->length == 0) ||
4974 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
4975 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4977 while ((reg_group = igbvf_regs[g_ind++]))
4978 count += igb_read_regs_group(dev, &data[count],
4987 eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
4989 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4991 /* Return unit is byte count */
4992 return hw->nvm.word_size * 2;
4996 eth_igb_get_eeprom(struct rte_eth_dev *dev,
4997 struct rte_dev_eeprom_info *in_eeprom)
4999 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5000 struct e1000_nvm_info *nvm = &hw->nvm;
5001 uint16_t *data = in_eeprom->data;
5004 first = in_eeprom->offset >> 1;
5005 length = in_eeprom->length >> 1;
5006 if ((first >= hw->nvm.word_size) ||
5007 ((first + length) >= hw->nvm.word_size))
5010 in_eeprom->magic = hw->vendor_id |
5011 ((uint32_t)hw->device_id << 16);
5013 if ((nvm->ops.read) == NULL)
5016 return nvm->ops.read(hw, first, length, data);
5020 eth_igb_set_eeprom(struct rte_eth_dev *dev,
5021 struct rte_dev_eeprom_info *in_eeprom)
5023 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5024 struct e1000_nvm_info *nvm = &hw->nvm;
5025 uint16_t *data = in_eeprom->data;
5028 first = in_eeprom->offset >> 1;
5029 length = in_eeprom->length >> 1;
5030 if ((first >= hw->nvm.word_size) ||
5031 ((first + length) >= hw->nvm.word_size))
5034 in_eeprom->magic = (uint32_t)hw->vendor_id |
5035 ((uint32_t)hw->device_id << 16);
5037 if ((nvm->ops.write) == NULL)
5039 return nvm->ops.write(hw, first, length, data);
5042 static struct rte_driver pmd_igb_drv = {
5044 .init = rte_igb_pmd_init,
5047 static struct rte_driver pmd_igbvf_drv = {
5049 .init = rte_igbvf_pmd_init,
5053 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5055 struct e1000_hw *hw =
5056 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5057 uint32_t mask = 1 << queue_id;
5059 E1000_WRITE_REG(hw, E1000_EIMC, mask);
5060 E1000_WRITE_FLUSH(hw);
5066 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5068 struct e1000_hw *hw =
5069 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5070 uint32_t mask = 1 << queue_id;
5073 regval = E1000_READ_REG(hw, E1000_EIMS);
5074 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
5075 E1000_WRITE_FLUSH(hw);
5077 rte_intr_enable(&dev->pci_dev->intr_handle);
5083 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
5084 uint8_t index, uint8_t offset)
5086 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
5089 val &= ~((uint32_t)0xFF << offset);
5091 /* write vector and valid bit */
5092 val |= (msix_vector | E1000_IVAR_VALID) << offset;
5094 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
5098 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
5099 uint8_t queue, uint8_t msix_vector)
5103 if (hw->mac.type == e1000_82575) {
5105 tmp = E1000_EICR_RX_QUEUE0 << queue;
5106 else if (direction == 1)
5107 tmp = E1000_EICR_TX_QUEUE0 << queue;
5108 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
5109 } else if (hw->mac.type == e1000_82576) {
5110 if ((direction == 0) || (direction == 1))
5111 eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
5112 ((queue & 0x8) << 1) +
5114 } else if ((hw->mac.type == e1000_82580) ||
5115 (hw->mac.type == e1000_i350) ||
5116 (hw->mac.type == e1000_i354) ||
5117 (hw->mac.type == e1000_i210) ||
5118 (hw->mac.type == e1000_i211)) {
5119 if ((direction == 0) || (direction == 1))
5120 eth_igb_write_ivar(hw, msix_vector,
5122 ((queue & 0x1) << 4) +
5127 /* Sets up the hardware to generate MSI-X interrupts properly
5129 * board private structure
5132 eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
5135 uint32_t tmpval, regval, intr_mask;
5136 struct e1000_hw *hw =
5137 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5138 uint32_t vec = E1000_MISC_VEC_ID;
5139 uint32_t base = E1000_MISC_VEC_ID;
5140 uint32_t misc_shift = 0;
5142 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
5144 /* won't configure msix register if no mapping is done
5145 * between intr vector and event fd
5147 if (!rte_intr_dp_is_en(intr_handle))
5150 if (rte_intr_allow_others(intr_handle)) {
5151 vec = base = E1000_RX_VEC_START;
5155 /* set interrupt vector for other causes */
5156 if (hw->mac.type == e1000_82575) {
5157 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
5158 /* enable MSI-X PBA support */
5159 tmpval |= E1000_CTRL_EXT_PBA_CLR;
5161 /* Auto-Mask interrupts upon ICR read */
5162 tmpval |= E1000_CTRL_EXT_EIAME;
5163 tmpval |= E1000_CTRL_EXT_IRCA;
5165 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
5167 /* enable msix_other interrupt */
5168 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
5169 regval = E1000_READ_REG(hw, E1000_EIAC);
5170 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
5171 regval = E1000_READ_REG(hw, E1000_EIAM);
5172 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
5173 } else if ((hw->mac.type == e1000_82576) ||
5174 (hw->mac.type == e1000_82580) ||
5175 (hw->mac.type == e1000_i350) ||
5176 (hw->mac.type == e1000_i354) ||
5177 (hw->mac.type == e1000_i210) ||
5178 (hw->mac.type == e1000_i211)) {
5179 /* turn on MSI-X capability first */
5180 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
5181 E1000_GPIE_PBA | E1000_GPIE_EIAME |
5183 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
5185 regval = E1000_READ_REG(hw, E1000_EIAC);
5186 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
5188 /* enable msix_other interrupt */
5189 regval = E1000_READ_REG(hw, E1000_EIMS);
5190 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
5191 tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
5192 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
5195 /* use EIAM to auto-mask when MSI-X interrupt
5196 * is asserted, this saves a register write for every interrupt
5198 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
5200 regval = E1000_READ_REG(hw, E1000_EIAM);
5201 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
5203 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
5204 eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
5205 intr_handle->intr_vec[queue_id] = vec;
5206 if (vec < base + intr_handle->nb_efd - 1)
5210 E1000_WRITE_FLUSH(hw);
5213 PMD_REGISTER_DRIVER(pmd_igb_drv);
5214 PMD_REGISTER_DRIVER(pmd_igbvf_drv);