4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
51 #include <rte_atomic.h>
52 #include <rte_malloc.h>
55 #include "e1000_logs.h"
56 #include "base/e1000_api.h"
57 #include "e1000_ethdev.h"
61 * Default values for port configuration
63 #define IGB_DEFAULT_RX_FREE_THRESH 32
64 #define IGB_DEFAULT_RX_PTHRESH 8
65 #define IGB_DEFAULT_RX_HTHRESH 8
66 #define IGB_DEFAULT_RX_WTHRESH 0
68 #define IGB_DEFAULT_TX_PTHRESH 32
69 #define IGB_DEFAULT_TX_HTHRESH 0
70 #define IGB_DEFAULT_TX_WTHRESH 0
72 #define IGB_HKEY_MAX_INDEX 10
74 /* Bit shift and mask */
75 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
76 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
77 #define IGB_8_BIT_WIDTH CHAR_BIT
78 #define IGB_8_BIT_MASK UINT8_MAX
80 /* Additional timesync values. */
81 #define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL
82 #define E1000_ETQF_FILTER_1588 3
83 #define IGB_82576_TSYNC_SHIFT 16
84 #define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
85 #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
86 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
88 static int eth_igb_configure(struct rte_eth_dev *dev);
89 static int eth_igb_start(struct rte_eth_dev *dev);
90 static void eth_igb_stop(struct rte_eth_dev *dev);
91 static void eth_igb_close(struct rte_eth_dev *dev);
92 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
93 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
94 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
95 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
96 static int eth_igb_link_update(struct rte_eth_dev *dev,
97 int wait_to_complete);
98 static void eth_igb_stats_get(struct rte_eth_dev *dev,
99 struct rte_eth_stats *rte_stats);
100 static int eth_igb_xstats_get(struct rte_eth_dev *dev,
101 struct rte_eth_xstats *xstats, unsigned n);
102 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
103 static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
104 static void eth_igb_infos_get(struct rte_eth_dev *dev,
105 struct rte_eth_dev_info *dev_info);
106 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
107 struct rte_eth_dev_info *dev_info);
108 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
109 struct rte_eth_fc_conf *fc_conf);
110 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
111 struct rte_eth_fc_conf *fc_conf);
112 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
113 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
114 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
115 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
116 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
118 static int igb_hardware_init(struct e1000_hw *hw);
119 static void igb_hw_control_acquire(struct e1000_hw *hw);
120 static void igb_hw_control_release(struct e1000_hw *hw);
121 static void igb_init_manageability(struct e1000_hw *hw);
122 static void igb_release_manageability(struct e1000_hw *hw);
124 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
126 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
127 uint16_t vlan_id, int on);
128 static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
129 enum rte_vlan_type vlan_type,
131 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
133 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
134 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
135 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
136 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
137 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
138 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
140 static int eth_igb_led_on(struct rte_eth_dev *dev);
141 static int eth_igb_led_off(struct rte_eth_dev *dev);
143 static void igb_intr_disable(struct e1000_hw *hw);
144 static int igb_get_rx_buffer_size(struct e1000_hw *hw);
145 static void eth_igb_rar_set(struct rte_eth_dev *dev,
146 struct ether_addr *mac_addr,
147 uint32_t index, uint32_t pool);
148 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
149 static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
150 struct ether_addr *addr);
152 static void igbvf_intr_disable(struct e1000_hw *hw);
153 static int igbvf_dev_configure(struct rte_eth_dev *dev);
154 static int igbvf_dev_start(struct rte_eth_dev *dev);
155 static void igbvf_dev_stop(struct rte_eth_dev *dev);
156 static void igbvf_dev_close(struct rte_eth_dev *dev);
157 static int eth_igbvf_link_update(struct e1000_hw *hw);
158 static void eth_igbvf_stats_get(struct rte_eth_dev *dev,
159 struct rte_eth_stats *rte_stats);
160 static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
161 struct rte_eth_xstats *xstats, unsigned n);
162 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
163 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
164 uint16_t vlan_id, int on);
165 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
166 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
167 static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
168 struct ether_addr *addr);
169 static int igbvf_get_reg_length(struct rte_eth_dev *dev);
170 static int igbvf_get_regs(struct rte_eth_dev *dev,
171 struct rte_dev_reg_info *regs);
173 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
174 struct rte_eth_rss_reta_entry64 *reta_conf,
176 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
177 struct rte_eth_rss_reta_entry64 *reta_conf,
180 static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
181 struct rte_eth_syn_filter *filter,
183 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
184 struct rte_eth_syn_filter *filter);
185 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
186 enum rte_filter_op filter_op,
188 static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
189 struct rte_eth_ntuple_filter *ntuple_filter);
190 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
191 struct rte_eth_ntuple_filter *ntuple_filter);
192 static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
193 struct rte_eth_flex_filter *filter,
195 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
196 struct rte_eth_flex_filter *filter);
197 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
198 enum rte_filter_op filter_op,
200 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
201 struct rte_eth_ntuple_filter *ntuple_filter);
202 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
203 struct rte_eth_ntuple_filter *ntuple_filter);
204 static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
205 struct rte_eth_ntuple_filter *filter,
207 static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
208 struct rte_eth_ntuple_filter *filter);
209 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
210 enum rte_filter_op filter_op,
212 static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
213 struct rte_eth_ethertype_filter *filter,
215 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
216 enum rte_filter_op filter_op,
218 static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
219 struct rte_eth_ethertype_filter *filter);
220 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
221 enum rte_filter_type filter_type,
222 enum rte_filter_op filter_op,
224 static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
225 static int eth_igb_get_regs(struct rte_eth_dev *dev,
226 struct rte_dev_reg_info *regs);
227 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
228 static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
229 struct rte_dev_eeprom_info *eeprom);
230 static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
231 struct rte_dev_eeprom_info *eeprom);
232 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
233 struct ether_addr *mc_addr_set,
234 uint32_t nb_mc_addr);
235 static int igb_timesync_enable(struct rte_eth_dev *dev);
236 static int igb_timesync_disable(struct rte_eth_dev *dev);
237 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
238 struct timespec *timestamp,
240 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
241 struct timespec *timestamp);
242 static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
243 static int igb_timesync_read_time(struct rte_eth_dev *dev,
244 struct timespec *timestamp);
245 static int igb_timesync_write_time(struct rte_eth_dev *dev,
246 const struct timespec *timestamp);
247 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
249 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
251 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
252 uint8_t queue, uint8_t msix_vector);
253 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
254 uint8_t index, uint8_t offset);
255 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
258 * Define VF Stats MACRO for Non "cleared on read" register
260 #define UPDATE_VF_STAT(reg, last, cur) \
262 u32 latest = E1000_READ_REG(hw, reg); \
263 cur += (latest - last) & UINT_MAX; \
267 #define IGB_FC_PAUSE_TIME 0x0680
268 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
269 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
271 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
273 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
276 * The set of PCI devices this driver supports
278 static const struct rte_pci_id pci_id_igb_map[] = {
280 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
281 #include "rte_pci_dev_ids.h"
287 * The set of PCI devices this driver supports (for 82576&I350 VF)
289 static const struct rte_pci_id pci_id_igbvf_map[] = {
291 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
292 #include "rte_pci_dev_ids.h"
297 static const struct rte_eth_desc_lim rx_desc_lim = {
298 .nb_max = E1000_MAX_RING_DESC,
299 .nb_min = E1000_MIN_RING_DESC,
300 .nb_align = IGB_RXD_ALIGN,
303 static const struct rte_eth_desc_lim tx_desc_lim = {
304 .nb_max = E1000_MAX_RING_DESC,
305 .nb_min = E1000_MIN_RING_DESC,
306 .nb_align = IGB_RXD_ALIGN,
309 static const struct eth_dev_ops eth_igb_ops = {
310 .dev_configure = eth_igb_configure,
311 .dev_start = eth_igb_start,
312 .dev_stop = eth_igb_stop,
313 .dev_close = eth_igb_close,
314 .promiscuous_enable = eth_igb_promiscuous_enable,
315 .promiscuous_disable = eth_igb_promiscuous_disable,
316 .allmulticast_enable = eth_igb_allmulticast_enable,
317 .allmulticast_disable = eth_igb_allmulticast_disable,
318 .link_update = eth_igb_link_update,
319 .stats_get = eth_igb_stats_get,
320 .xstats_get = eth_igb_xstats_get,
321 .stats_reset = eth_igb_stats_reset,
322 .xstats_reset = eth_igb_xstats_reset,
323 .dev_infos_get = eth_igb_infos_get,
324 .mtu_set = eth_igb_mtu_set,
325 .vlan_filter_set = eth_igb_vlan_filter_set,
326 .vlan_tpid_set = eth_igb_vlan_tpid_set,
327 .vlan_offload_set = eth_igb_vlan_offload_set,
328 .rx_queue_setup = eth_igb_rx_queue_setup,
329 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
330 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
331 .rx_queue_release = eth_igb_rx_queue_release,
332 .rx_queue_count = eth_igb_rx_queue_count,
333 .rx_descriptor_done = eth_igb_rx_descriptor_done,
334 .tx_queue_setup = eth_igb_tx_queue_setup,
335 .tx_queue_release = eth_igb_tx_queue_release,
336 .dev_led_on = eth_igb_led_on,
337 .dev_led_off = eth_igb_led_off,
338 .flow_ctrl_get = eth_igb_flow_ctrl_get,
339 .flow_ctrl_set = eth_igb_flow_ctrl_set,
340 .mac_addr_add = eth_igb_rar_set,
341 .mac_addr_remove = eth_igb_rar_clear,
342 .mac_addr_set = eth_igb_default_mac_addr_set,
343 .reta_update = eth_igb_rss_reta_update,
344 .reta_query = eth_igb_rss_reta_query,
345 .rss_hash_update = eth_igb_rss_hash_update,
346 .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
347 .filter_ctrl = eth_igb_filter_ctrl,
348 .set_mc_addr_list = eth_igb_set_mc_addr_list,
349 .rxq_info_get = igb_rxq_info_get,
350 .txq_info_get = igb_txq_info_get,
351 .timesync_enable = igb_timesync_enable,
352 .timesync_disable = igb_timesync_disable,
353 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
354 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
355 .get_reg_length = eth_igb_get_reg_length,
356 .get_reg = eth_igb_get_regs,
357 .get_eeprom_length = eth_igb_get_eeprom_length,
358 .get_eeprom = eth_igb_get_eeprom,
359 .set_eeprom = eth_igb_set_eeprom,
360 .timesync_adjust_time = igb_timesync_adjust_time,
361 .timesync_read_time = igb_timesync_read_time,
362 .timesync_write_time = igb_timesync_write_time,
366 * dev_ops for virtual function, bare necessities for basic vf
367 * operation have been implemented
369 static const struct eth_dev_ops igbvf_eth_dev_ops = {
370 .dev_configure = igbvf_dev_configure,
371 .dev_start = igbvf_dev_start,
372 .dev_stop = igbvf_dev_stop,
373 .dev_close = igbvf_dev_close,
374 .link_update = eth_igb_link_update,
375 .stats_get = eth_igbvf_stats_get,
376 .xstats_get = eth_igbvf_xstats_get,
377 .stats_reset = eth_igbvf_stats_reset,
378 .xstats_reset = eth_igbvf_stats_reset,
379 .vlan_filter_set = igbvf_vlan_filter_set,
380 .dev_infos_get = eth_igbvf_infos_get,
381 .rx_queue_setup = eth_igb_rx_queue_setup,
382 .rx_queue_release = eth_igb_rx_queue_release,
383 .tx_queue_setup = eth_igb_tx_queue_setup,
384 .tx_queue_release = eth_igb_tx_queue_release,
385 .set_mc_addr_list = eth_igb_set_mc_addr_list,
386 .rxq_info_get = igb_rxq_info_get,
387 .txq_info_get = igb_txq_info_get,
388 .mac_addr_set = igbvf_default_mac_addr_set,
389 .get_reg_length = igbvf_get_reg_length,
390 .get_reg = igbvf_get_regs,
393 /* store statistics names and its offset in stats structure */
394 struct rte_igb_xstats_name_off {
395 char name[RTE_ETH_XSTATS_NAME_SIZE];
399 static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = {
400 {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)},
401 {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)},
402 {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)},
403 {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)},
404 {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)},
405 {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)},
406 {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats,
408 {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)},
409 {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)},
410 {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)},
411 {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)},
412 {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)},
413 {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)},
414 {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)},
415 {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)},
416 {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)},
417 {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)},
418 {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats,
420 {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)},
421 {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)},
422 {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)},
423 {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)},
424 {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
426 {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats,
428 {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)},
429 {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)},
430 {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)},
431 {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)},
432 {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)},
433 {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)},
434 {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)},
435 {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)},
436 {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)},
437 {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)},
438 {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)},
439 {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)},
440 {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)},
441 {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)},
442 {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)},
443 {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)},
444 {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)},
445 {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
447 {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats,
449 {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)},
450 {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)},
451 {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)},
452 {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)},
453 {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)},
454 {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)},
455 {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)},
457 {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)},
460 #define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \
461 sizeof(rte_igb_stats_strings[0]))
463 static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = {
464 {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)},
465 {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)},
466 {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)},
467 {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)},
468 {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)},
471 #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
472 sizeof(rte_igbvf_stats_strings[0]))
475 * Atomically reads the link status information from global
476 * structure rte_eth_dev.
479 * - Pointer to the structure rte_eth_dev to read from.
480 * - Pointer to the buffer to be saved with the link status.
483 * - On success, zero.
484 * - On failure, negative value.
487 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
488 struct rte_eth_link *link)
490 struct rte_eth_link *dst = link;
491 struct rte_eth_link *src = &(dev->data->dev_link);
493 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
494 *(uint64_t *)src) == 0)
501 * Atomically writes the link status information into global
502 * structure rte_eth_dev.
505 * - Pointer to the structure rte_eth_dev to read from.
506 * - Pointer to the buffer to be saved with the link status.
509 * - On success, zero.
510 * - On failure, negative value.
513 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
514 struct rte_eth_link *link)
516 struct rte_eth_link *dst = &(dev->data->dev_link);
517 struct rte_eth_link *src = link;
519 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
520 *(uint64_t *)src) == 0)
527 igb_intr_enable(struct rte_eth_dev *dev)
529 struct e1000_interrupt *intr =
530 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
531 struct e1000_hw *hw =
532 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
534 E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
535 E1000_WRITE_FLUSH(hw);
539 igb_intr_disable(struct e1000_hw *hw)
541 E1000_WRITE_REG(hw, E1000_IMC, ~0);
542 E1000_WRITE_FLUSH(hw);
545 static inline int32_t
546 igb_pf_reset_hw(struct e1000_hw *hw)
551 status = e1000_reset_hw(hw);
553 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
554 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
555 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
556 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
557 E1000_WRITE_FLUSH(hw);
563 igb_identify_hardware(struct rte_eth_dev *dev)
565 struct e1000_hw *hw =
566 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
568 hw->vendor_id = dev->pci_dev->id.vendor_id;
569 hw->device_id = dev->pci_dev->id.device_id;
570 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
571 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
573 e1000_set_mac_type(hw);
575 /* need to check if it is a vf device below */
579 igb_reset_swfw_lock(struct e1000_hw *hw)
584 * Do mac ops initialization manually here, since we will need
585 * some function pointers set by this call.
587 ret_val = e1000_init_mac_params(hw);
592 * SMBI lock should not fail in this early stage. If this is the case,
593 * it is due to an improper exit of the application.
594 * So force the release of the faulty lock.
596 if (e1000_get_hw_semaphore_generic(hw) < 0) {
597 PMD_DRV_LOG(DEBUG, "SMBI lock released");
599 e1000_put_hw_semaphore_generic(hw);
601 if (hw->mac.ops.acquire_swfw_sync != NULL) {
605 * Phy lock should not fail in this early stage. If this is the case,
606 * it is due to an improper exit of the application.
607 * So force the release of the faulty lock.
609 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
610 if (hw->bus.func > E1000_FUNC_1)
612 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
613 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
616 hw->mac.ops.release_swfw_sync(hw, mask);
619 * This one is more tricky since it is common to all ports; but
620 * swfw_sync retries last long enough (1s) to be almost sure that if
621 * lock can not be taken it is due to an improper lock of the
624 mask = E1000_SWFW_EEP_SM;
625 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
626 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
628 hw->mac.ops.release_swfw_sync(hw, mask);
631 return E1000_SUCCESS;
635 eth_igb_dev_init(struct rte_eth_dev *eth_dev)
638 struct rte_pci_device *pci_dev;
639 struct e1000_hw *hw =
640 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
641 struct e1000_vfta * shadow_vfta =
642 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
643 struct e1000_filter_info *filter_info =
644 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
645 struct e1000_adapter *adapter =
646 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
650 pci_dev = eth_dev->pci_dev;
652 eth_dev->dev_ops = ð_igb_ops;
653 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
654 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
656 /* for secondary processes, we don't initialise any further as primary
657 * has already done this work. Only check we don't need a different
659 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
660 if (eth_dev->data->scattered_rx)
661 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
665 rte_eth_copy_pci_info(eth_dev, pci_dev);
667 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
669 igb_identify_hardware(eth_dev);
670 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
675 e1000_get_bus_info(hw);
677 /* Reset any pending lock */
678 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
683 /* Finish initialization */
684 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
690 hw->phy.autoneg_wait_to_complete = 0;
691 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
694 if (hw->phy.media_type == e1000_media_type_copper) {
695 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
696 hw->phy.disable_polarity_correction = 0;
697 hw->phy.ms_type = e1000_ms_hw_default;
701 * Start from a known state, this is important in reading the nvm
706 /* Make sure we have a good EEPROM before we read from it */
707 if (e1000_validate_nvm_checksum(hw) < 0) {
709 * Some PCI-E parts fail the first check due to
710 * the link being in sleep state, call it again,
711 * if it fails a second time its a real issue.
713 if (e1000_validate_nvm_checksum(hw) < 0) {
714 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
720 /* Read the permanent MAC address out of the EEPROM */
721 if (e1000_read_mac_addr(hw) != 0) {
722 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
727 /* Allocate memory for storing MAC addresses */
728 eth_dev->data->mac_addrs = rte_zmalloc("e1000",
729 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
730 if (eth_dev->data->mac_addrs == NULL) {
731 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
732 "store MAC addresses",
733 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
738 /* Copy the permanent MAC address */
739 ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]);
741 /* initialize the vfta */
742 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
744 /* Now initialize the hardware */
745 if (igb_hardware_init(hw) != 0) {
746 PMD_INIT_LOG(ERR, "Hardware initialization failed");
747 rte_free(eth_dev->data->mac_addrs);
748 eth_dev->data->mac_addrs = NULL;
752 hw->mac.get_link_status = 1;
753 adapter->stopped = 0;
755 /* Indicate SOL/IDER usage */
756 if (e1000_check_reset_block(hw) < 0) {
757 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
761 /* initialize PF if max_vfs not zero */
762 igb_pf_host_init(eth_dev);
764 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
765 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
766 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
767 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
768 E1000_WRITE_FLUSH(hw);
770 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
771 eth_dev->data->port_id, pci_dev->id.vendor_id,
772 pci_dev->id.device_id);
774 rte_intr_callback_register(&pci_dev->intr_handle,
775 eth_igb_interrupt_handler,
778 /* enable uio/vfio intr/eventfd mapping */
779 rte_intr_enable(&pci_dev->intr_handle);
781 /* enable support intr */
782 igb_intr_enable(eth_dev);
784 TAILQ_INIT(&filter_info->flex_list);
785 filter_info->flex_mask = 0;
786 TAILQ_INIT(&filter_info->twotuple_list);
787 filter_info->twotuple_mask = 0;
788 TAILQ_INIT(&filter_info->fivetuple_list);
789 filter_info->fivetuple_mask = 0;
794 igb_hw_control_release(hw);
800 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
802 struct rte_pci_device *pci_dev;
804 struct e1000_adapter *adapter =
805 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
807 PMD_INIT_FUNC_TRACE();
809 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
812 hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
813 pci_dev = eth_dev->pci_dev;
815 if (adapter->stopped == 0)
816 eth_igb_close(eth_dev);
818 eth_dev->dev_ops = NULL;
819 eth_dev->rx_pkt_burst = NULL;
820 eth_dev->tx_pkt_burst = NULL;
822 /* Reset any pending lock */
823 igb_reset_swfw_lock(hw);
825 rte_free(eth_dev->data->mac_addrs);
826 eth_dev->data->mac_addrs = NULL;
828 /* uninitialize PF if max_vfs not zero */
829 igb_pf_host_uninit(eth_dev);
831 /* disable uio intr before callback unregister */
832 rte_intr_disable(&(pci_dev->intr_handle));
833 rte_intr_callback_unregister(&(pci_dev->intr_handle),
834 eth_igb_interrupt_handler, (void *)eth_dev);
840 * Virtual Function device init
843 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
845 struct rte_pci_device *pci_dev;
846 struct e1000_adapter *adapter =
847 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
848 struct e1000_hw *hw =
849 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
851 struct ether_addr *perm_addr = (struct ether_addr *)hw->mac.perm_addr;
853 PMD_INIT_FUNC_TRACE();
855 eth_dev->dev_ops = &igbvf_eth_dev_ops;
856 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
857 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
859 /* for secondary processes, we don't initialise any further as primary
860 * has already done this work. Only check we don't need a different
862 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
863 if (eth_dev->data->scattered_rx)
864 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
868 pci_dev = eth_dev->pci_dev;
870 rte_eth_copy_pci_info(eth_dev, pci_dev);
872 hw->device_id = pci_dev->id.device_id;
873 hw->vendor_id = pci_dev->id.vendor_id;
874 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
875 adapter->stopped = 0;
877 /* Initialize the shared code (base driver) */
878 diag = e1000_setup_init_funcs(hw, TRUE);
880 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
885 /* init_mailbox_params */
886 hw->mbx.ops.init_params(hw);
888 /* Disable the interrupts for VF */
889 igbvf_intr_disable(hw);
891 diag = hw->mac.ops.reset_hw(hw);
893 /* Allocate memory for storing MAC addresses */
894 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
895 hw->mac.rar_entry_count, 0);
896 if (eth_dev->data->mac_addrs == NULL) {
898 "Failed to allocate %d bytes needed to store MAC "
900 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
904 /* Generate a random MAC address, if none was assigned by PF. */
905 if (is_zero_ether_addr(perm_addr)) {
906 eth_random_addr(perm_addr->addr_bytes);
907 diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
909 rte_free(eth_dev->data->mac_addrs);
910 eth_dev->data->mac_addrs = NULL;
913 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
914 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
915 "%02x:%02x:%02x:%02x:%02x:%02x",
916 perm_addr->addr_bytes[0],
917 perm_addr->addr_bytes[1],
918 perm_addr->addr_bytes[2],
919 perm_addr->addr_bytes[3],
920 perm_addr->addr_bytes[4],
921 perm_addr->addr_bytes[5]);
924 /* Copy the permanent MAC address */
925 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
926 ð_dev->data->mac_addrs[0]);
928 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
930 eth_dev->data->port_id, pci_dev->id.vendor_id,
931 pci_dev->id.device_id, "igb_mac_82576_vf");
937 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
939 struct e1000_adapter *adapter =
940 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
942 PMD_INIT_FUNC_TRACE();
944 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
947 if (adapter->stopped == 0)
948 igbvf_dev_close(eth_dev);
950 eth_dev->dev_ops = NULL;
951 eth_dev->rx_pkt_burst = NULL;
952 eth_dev->tx_pkt_burst = NULL;
954 rte_free(eth_dev->data->mac_addrs);
955 eth_dev->data->mac_addrs = NULL;
960 static struct eth_driver rte_igb_pmd = {
962 .name = "rte_igb_pmd",
963 .id_table = pci_id_igb_map,
964 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
965 RTE_PCI_DRV_DETACHABLE,
967 .eth_dev_init = eth_igb_dev_init,
968 .eth_dev_uninit = eth_igb_dev_uninit,
969 .dev_private_size = sizeof(struct e1000_adapter),
973 * virtual function driver struct
975 static struct eth_driver rte_igbvf_pmd = {
977 .name = "rte_igbvf_pmd",
978 .id_table = pci_id_igbvf_map,
979 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
981 .eth_dev_init = eth_igbvf_dev_init,
982 .eth_dev_uninit = eth_igbvf_dev_uninit,
983 .dev_private_size = sizeof(struct e1000_adapter),
987 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
989 rte_eth_driver_register(&rte_igb_pmd);
994 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
996 struct e1000_hw *hw =
997 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
998 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
999 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
1000 rctl |= E1000_RCTL_VFE;
1001 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1005 * VF Driver initialization routine.
1006 * Invoked one at EAL init time.
1007 * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
1010 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
1012 PMD_INIT_FUNC_TRACE();
1014 rte_eth_driver_register(&rte_igbvf_pmd);
1019 igb_check_mq_mode(struct rte_eth_dev *dev)
1021 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1022 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1023 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1024 uint16_t nb_tx_q = dev->data->nb_rx_queues;
1026 if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
1027 tx_mq_mode == ETH_MQ_TX_DCB ||
1028 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1029 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
1032 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1033 /* Check multi-queue mode.
1034 * To no break software we accept ETH_MQ_RX_NONE as this might
1035 * be used to turn off VLAN filter.
1038 if (rx_mq_mode == ETH_MQ_RX_NONE ||
1039 rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1040 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
1041 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1043 /* Only support one queue on VFs.
1044 * RSS together with SRIOV is not supported.
1046 PMD_INIT_LOG(ERR, "SRIOV is active,"
1047 " wrong mq_mode rx %d.",
1051 /* TX mode is not used here, so mode might be ignored.*/
1052 if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
1053 /* SRIOV only works in VMDq enable mode */
1054 PMD_INIT_LOG(WARNING, "SRIOV is active,"
1055 " TX mode %d is not supported. "
1056 " Driver will behave as %d mode.",
1057 tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
1060 /* check valid queue number */
1061 if ((nb_rx_q > 1) || (nb_tx_q > 1)) {
1062 PMD_INIT_LOG(ERR, "SRIOV is active,"
1063 " only support one queue on VFs.");
1067 /* To no break software that set invalid mode, only display
1068 * warning if invalid mode is used.
1070 if (rx_mq_mode != ETH_MQ_RX_NONE &&
1071 rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
1072 rx_mq_mode != ETH_MQ_RX_RSS) {
1073 /* RSS together with VMDq not supported*/
1074 PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
1079 if (tx_mq_mode != ETH_MQ_TX_NONE &&
1080 tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
1081 PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
1082 " Due to txmode is meaningless in this"
1083 " driver, just ignore.",
1091 eth_igb_configure(struct rte_eth_dev *dev)
1093 struct e1000_interrupt *intr =
1094 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1097 PMD_INIT_FUNC_TRACE();
1099 /* multipe queue mode checking */
1100 ret = igb_check_mq_mode(dev);
1102 PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
1107 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1108 PMD_INIT_FUNC_TRACE();
1114 eth_igb_start(struct rte_eth_dev *dev)
1116 struct e1000_hw *hw =
1117 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1118 struct e1000_adapter *adapter =
1119 E1000_DEV_PRIVATE(dev->data->dev_private);
1120 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1122 uint32_t intr_vector = 0;
1125 PMD_INIT_FUNC_TRACE();
1127 /* disable uio/vfio intr/eventfd mapping */
1128 rte_intr_disable(intr_handle);
1130 /* Power up the phy. Needed to make the link go Up */
1131 e1000_power_up_phy(hw);
1134 * Packet Buffer Allocation (PBA)
1135 * Writing PBA sets the receive portion of the buffer
1136 * the remainder is used for the transmit buffer.
1138 if (hw->mac.type == e1000_82575) {
1141 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1142 E1000_WRITE_REG(hw, E1000_PBA, pba);
1145 /* Put the address into the Receive Address Array */
1146 e1000_rar_set(hw, hw->mac.addr, 0);
1148 /* Initialize the hardware */
1149 if (igb_hardware_init(hw)) {
1150 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
1153 adapter->stopped = 0;
1155 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1157 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1158 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1159 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
1160 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1161 E1000_WRITE_FLUSH(hw);
1163 /* configure PF module if SRIOV enabled */
1164 igb_pf_host_configure(dev);
1166 /* check and configure queue intr-vector mapping */
1167 if ((rte_intr_cap_multiple(intr_handle) ||
1168 !RTE_ETH_DEV_SRIOV(dev).active) &&
1169 dev->data->dev_conf.intr_conf.rxq != 0) {
1170 intr_vector = dev->data->nb_rx_queues;
1171 if (rte_intr_efd_enable(intr_handle, intr_vector))
1175 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1176 intr_handle->intr_vec =
1177 rte_zmalloc("intr_vec",
1178 dev->data->nb_rx_queues * sizeof(int), 0);
1179 if (intr_handle->intr_vec == NULL) {
1180 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1181 " intr_vec\n", dev->data->nb_rx_queues);
1186 /* confiugre msix for rx interrupt */
1187 eth_igb_configure_msix_intr(dev);
1189 /* Configure for OS presence */
1190 igb_init_manageability(hw);
1192 eth_igb_tx_init(dev);
1194 /* This can fail when allocating mbufs for descriptor rings */
1195 ret = eth_igb_rx_init(dev);
1197 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1198 igb_dev_clear_queues(dev);
1202 e1000_clear_hw_cntrs_base_generic(hw);
1205 * VLAN Offload Settings
1207 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
1208 ETH_VLAN_EXTEND_MASK;
1209 eth_igb_vlan_offload_set(dev, mask);
1211 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1212 /* Enable VLAN filter since VMDq always use VLAN filter */
1213 igb_vmdq_vlan_hw_filter_enable(dev);
1216 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
1217 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
1218 (hw->mac.type == e1000_i211)) {
1219 /* Configure EITR with the maximum possible value (0xFFFF) */
1220 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
1223 /* Setup link speed and duplex */
1224 switch (dev->data->dev_conf.link_speed) {
1225 case ETH_LINK_SPEED_AUTONEG:
1226 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
1227 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
1228 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1229 hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
1230 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1231 hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
1233 goto error_invalid_config;
1235 case ETH_LINK_SPEED_10:
1236 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
1237 hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
1238 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1239 hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
1240 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1241 hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
1243 goto error_invalid_config;
1245 case ETH_LINK_SPEED_100:
1246 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
1247 hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
1248 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
1249 hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
1250 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
1251 hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
1253 goto error_invalid_config;
1255 case ETH_LINK_SPEED_1000:
1256 if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
1257 (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
1258 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
1260 goto error_invalid_config;
1262 case ETH_LINK_SPEED_10000:
1264 goto error_invalid_config;
1266 e1000_setup_link(hw);
1268 if (rte_intr_allow_others(intr_handle)) {
1269 /* check if lsc interrupt is enabled */
1270 if (dev->data->dev_conf.intr_conf.lsc != 0)
1271 eth_igb_lsc_interrupt_setup(dev);
1273 rte_intr_callback_unregister(intr_handle,
1274 eth_igb_interrupt_handler,
1276 if (dev->data->dev_conf.intr_conf.lsc != 0)
1277 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1278 " no intr multiplex\n");
1281 /* check if rxq interrupt is enabled */
1282 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1283 rte_intr_dp_is_en(intr_handle))
1284 eth_igb_rxq_interrupt_setup(dev);
1286 /* enable uio/vfio intr/eventfd mapping */
1287 rte_intr_enable(intr_handle);
1289 /* resume enabled intr since hw reset */
1290 igb_intr_enable(dev);
1292 PMD_INIT_LOG(DEBUG, "<<");
1296 error_invalid_config:
1297 PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
1298 dev->data->dev_conf.link_speed,
1299 dev->data->dev_conf.link_duplex, dev->data->port_id);
1300 igb_dev_clear_queues(dev);
1304 /*********************************************************************
1306 * This routine disables all traffic on the adapter by issuing a
1307 * global reset on the MAC.
1309 **********************************************************************/
1311 eth_igb_stop(struct rte_eth_dev *dev)
1313 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1314 struct e1000_filter_info *filter_info =
1315 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1316 struct rte_eth_link link;
1317 struct e1000_flex_filter *p_flex;
1318 struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
1319 struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
1320 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1322 igb_intr_disable(hw);
1324 /* disable intr eventfd mapping */
1325 rte_intr_disable(intr_handle);
1327 igb_pf_reset_hw(hw);
1328 E1000_WRITE_REG(hw, E1000_WUC, 0);
1330 /* Set bit for Go Link disconnect */
1331 if (hw->mac.type >= e1000_82580) {
1334 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1335 phpm_reg |= E1000_82580_PM_GO_LINKD;
1336 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1339 /* Power down the phy. Needed to make the link go Down */
1340 if (hw->phy.media_type == e1000_media_type_copper)
1341 e1000_power_down_phy(hw);
1343 e1000_shutdown_fiber_serdes_link(hw);
1345 igb_dev_clear_queues(dev);
1347 /* clear the recorded link status */
1348 memset(&link, 0, sizeof(link));
1349 rte_igb_dev_atomic_write_link_status(dev, &link);
1351 /* Remove all flex filters of the device */
1352 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
1353 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
1356 filter_info->flex_mask = 0;
1358 /* Remove all ntuple filters of the device */
1359 for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
1360 p_5tuple != NULL; p_5tuple = p_5tuple_next) {
1361 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
1362 TAILQ_REMOVE(&filter_info->fivetuple_list,
1366 filter_info->fivetuple_mask = 0;
1367 for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
1368 p_2tuple != NULL; p_2tuple = p_2tuple_next) {
1369 p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
1370 TAILQ_REMOVE(&filter_info->twotuple_list,
1374 filter_info->twotuple_mask = 0;
1376 if (!rte_intr_allow_others(intr_handle))
1377 /* resume to the default handler */
1378 rte_intr_callback_register(intr_handle,
1379 eth_igb_interrupt_handler,
1382 /* Clean datapath event and queue/vec mapping */
1383 rte_intr_efd_disable(intr_handle);
1384 if (intr_handle->intr_vec != NULL) {
1385 rte_free(intr_handle->intr_vec);
1386 intr_handle->intr_vec = NULL;
1391 eth_igb_close(struct rte_eth_dev *dev)
1393 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1394 struct e1000_adapter *adapter =
1395 E1000_DEV_PRIVATE(dev->data->dev_private);
1396 struct rte_eth_link link;
1397 struct rte_pci_device *pci_dev;
1400 adapter->stopped = 1;
1402 e1000_phy_hw_reset(hw);
1403 igb_release_manageability(hw);
1404 igb_hw_control_release(hw);
1406 /* Clear bit for Go Link disconnect */
1407 if (hw->mac.type >= e1000_82580) {
1410 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1411 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1412 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1415 igb_dev_free_queues(dev);
1417 pci_dev = dev->pci_dev;
1418 if (pci_dev->intr_handle.intr_vec) {
1419 rte_free(pci_dev->intr_handle.intr_vec);
1420 pci_dev->intr_handle.intr_vec = NULL;
1423 memset(&link, 0, sizeof(link));
1424 rte_igb_dev_atomic_write_link_status(dev, &link);
1428 igb_get_rx_buffer_size(struct e1000_hw *hw)
1430 uint32_t rx_buf_size;
1431 if (hw->mac.type == e1000_82576) {
1432 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1433 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1434 /* PBS needs to be translated according to a lookup table */
1435 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1436 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1437 rx_buf_size = (rx_buf_size << 10);
1438 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1439 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1441 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1447 /*********************************************************************
1449 * Initialize the hardware
1451 **********************************************************************/
1453 igb_hardware_init(struct e1000_hw *hw)
1455 uint32_t rx_buf_size;
1458 /* Let the firmware know the OS is in control */
1459 igb_hw_control_acquire(hw);
1462 * These parameters control the automatic generation (Tx) and
1463 * response (Rx) to Ethernet PAUSE frames.
1464 * - High water mark should allow for at least two standard size (1518)
1465 * frames to be received after sending an XOFF.
1466 * - Low water mark works best when it is very near the high water mark.
1467 * This allows the receiver to restart by sending XON when it has
1468 * drained a bit. Here we use an arbitrary value of 1500 which will
1469 * restart after one full frame is pulled from the buffer. There
1470 * could be several smaller frames in the buffer and if so they will
1471 * not trigger the XON until their total number reduces the buffer
1473 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1475 rx_buf_size = igb_get_rx_buffer_size(hw);
1477 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1478 hw->fc.low_water = hw->fc.high_water - 1500;
1479 hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1480 hw->fc.send_xon = 1;
1482 /* Set Flow control, use the tunable location if sane */
1483 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1484 hw->fc.requested_mode = igb_fc_setting;
1486 hw->fc.requested_mode = e1000_fc_none;
1488 /* Issue a global reset */
1489 igb_pf_reset_hw(hw);
1490 E1000_WRITE_REG(hw, E1000_WUC, 0);
1492 diag = e1000_init_hw(hw);
1496 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1497 e1000_get_phy_info(hw);
1498 e1000_check_for_link(hw);
1503 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1505 igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
1509 uint64_t old_gprc = stats->gprc;
1510 uint64_t old_gptc = stats->gptc;
1511 uint64_t old_tpr = stats->tpr;
1512 uint64_t old_tpt = stats->tpt;
1513 uint64_t old_rpthc = stats->rpthc;
1514 uint64_t old_hgptc = stats->hgptc;
1516 if(hw->phy.media_type == e1000_media_type_copper ||
1517 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1519 E1000_READ_REG(hw,E1000_SYMERRS);
1520 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1523 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1524 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1525 stats->scc += E1000_READ_REG(hw, E1000_SCC);
1526 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1528 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1529 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1530 stats->colc += E1000_READ_REG(hw, E1000_COLC);
1531 stats->dc += E1000_READ_REG(hw, E1000_DC);
1532 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1533 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1534 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1536 ** For watchdog management we need to know if we have been
1537 ** paused during the last interval, so capture that here.
1539 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1540 stats->xoffrxc += pause_frames;
1541 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1542 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1543 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1544 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1545 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1546 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1547 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1548 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1549 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1550 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1551 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1552 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1554 /* For the 64-bit byte counters the low dword must be read first. */
1555 /* Both registers clear on the read of the high dword */
1557 /* Workaround CRC bytes included in size, take away 4 bytes/packet */
1558 stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1559 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1560 stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN;
1561 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1562 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1563 stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN;
1565 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1566 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1567 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1568 stats->roc += E1000_READ_REG(hw, E1000_ROC);
1569 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1571 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1572 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1574 stats->tor += E1000_READ_REG(hw, E1000_TORL);
1575 stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
1576 stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN;
1577 stats->tot += E1000_READ_REG(hw, E1000_TOTL);
1578 stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
1579 stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN;
1581 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1582 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1583 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1584 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1585 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1586 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1587 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1588 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1590 /* Interrupt Counts */
1592 stats->iac += E1000_READ_REG(hw, E1000_IAC);
1593 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1594 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1595 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1596 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1597 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1598 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1599 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1600 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1602 /* Host to Card Statistics */
1604 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1605 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1606 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1607 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1608 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1609 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1610 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1611 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1612 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1613 stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN;
1614 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1615 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1616 stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN;
1617 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1618 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1619 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1621 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1622 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1623 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1624 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1625 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1626 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1630 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1632 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1633 struct e1000_hw_stats *stats =
1634 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1636 igb_read_stats_registers(hw, stats);
1638 if (rte_stats == NULL)
1642 rte_stats->imissed = stats->mpc;
1643 rte_stats->ierrors = stats->crcerrs +
1644 stats->rlec + stats->ruc + stats->roc +
1645 rte_stats->imissed +
1646 stats->rxerrc + stats->algnerrc + stats->cexterr;
1649 rte_stats->oerrors = stats->ecol + stats->latecol;
1651 rte_stats->ipackets = stats->gprc;
1652 rte_stats->opackets = stats->gptc;
1653 rte_stats->ibytes = stats->gorc;
1654 rte_stats->obytes = stats->gotc;
1658 eth_igb_stats_reset(struct rte_eth_dev *dev)
1660 struct e1000_hw_stats *hw_stats =
1661 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1663 /* HW registers are cleared on read */
1664 eth_igb_stats_get(dev, NULL);
1666 /* Reset software totals */
1667 memset(hw_stats, 0, sizeof(*hw_stats));
1671 eth_igb_xstats_reset(struct rte_eth_dev *dev)
1673 struct e1000_hw_stats *stats =
1674 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1676 /* HW registers are cleared on read */
1677 eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS);
1679 /* Reset software totals */
1680 memset(stats, 0, sizeof(*stats));
1684 eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
1687 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1688 struct e1000_hw_stats *hw_stats =
1689 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1692 if (n < IGB_NB_XSTATS)
1693 return IGB_NB_XSTATS;
1695 igb_read_stats_registers(hw, hw_stats);
1697 /* If this is a reset xstats is NULL, and we have cleared the
1698 * registers by reading them.
1703 /* Extended stats */
1704 for (i = 0; i < IGB_NB_XSTATS; i++) {
1705 snprintf(xstats[i].name, sizeof(xstats[i].name),
1706 "%s", rte_igb_stats_strings[i].name);
1707 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1708 rte_igb_stats_strings[i].offset);
1711 return IGB_NB_XSTATS;
1715 igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
1717 /* Good Rx packets, include VF loopback */
1718 UPDATE_VF_STAT(E1000_VFGPRC,
1719 hw_stats->last_gprc, hw_stats->gprc);
1721 /* Good Rx octets, include VF loopback */
1722 UPDATE_VF_STAT(E1000_VFGORC,
1723 hw_stats->last_gorc, hw_stats->gorc);
1725 /* Good Tx packets, include VF loopback */
1726 UPDATE_VF_STAT(E1000_VFGPTC,
1727 hw_stats->last_gptc, hw_stats->gptc);
1729 /* Good Tx octets, include VF loopback */
1730 UPDATE_VF_STAT(E1000_VFGOTC,
1731 hw_stats->last_gotc, hw_stats->gotc);
1733 /* Rx Multicst packets */
1734 UPDATE_VF_STAT(E1000_VFMPRC,
1735 hw_stats->last_mprc, hw_stats->mprc);
1737 /* Good Rx loopback packets */
1738 UPDATE_VF_STAT(E1000_VFGPRLBC,
1739 hw_stats->last_gprlbc, hw_stats->gprlbc);
1741 /* Good Rx loopback octets */
1742 UPDATE_VF_STAT(E1000_VFGORLBC,
1743 hw_stats->last_gorlbc, hw_stats->gorlbc);
1745 /* Good Tx loopback packets */
1746 UPDATE_VF_STAT(E1000_VFGPTLBC,
1747 hw_stats->last_gptlbc, hw_stats->gptlbc);
1749 /* Good Tx loopback octets */
1750 UPDATE_VF_STAT(E1000_VFGOTLBC,
1751 hw_stats->last_gotlbc, hw_stats->gotlbc);
1755 eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
1758 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1759 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
1760 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1763 if (n < IGBVF_NB_XSTATS)
1764 return IGBVF_NB_XSTATS;
1766 igbvf_read_stats_registers(hw, hw_stats);
1771 for (i = 0; i < IGBVF_NB_XSTATS; i++) {
1772 snprintf(xstats[i].name, sizeof(xstats[i].name), "%s",
1773 rte_igbvf_stats_strings[i].name);
1774 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1775 rte_igbvf_stats_strings[i].offset);
1778 return IGBVF_NB_XSTATS;
1782 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1784 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1785 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
1786 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1788 igbvf_read_stats_registers(hw, hw_stats);
1790 if (rte_stats == NULL)
1793 rte_stats->ipackets = hw_stats->gprc;
1794 rte_stats->ibytes = hw_stats->gorc;
1795 rte_stats->opackets = hw_stats->gptc;
1796 rte_stats->obytes = hw_stats->gotc;
1797 rte_stats->imcasts = hw_stats->mprc;
1798 rte_stats->ilbpackets = hw_stats->gprlbc;
1799 rte_stats->ilbbytes = hw_stats->gorlbc;
1800 rte_stats->olbpackets = hw_stats->gptlbc;
1801 rte_stats->olbbytes = hw_stats->gotlbc;
1805 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1807 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1808 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1810 /* Sync HW register to the last stats */
1811 eth_igbvf_stats_get(dev, NULL);
1813 /* reset HW current stats*/
1814 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1815 offsetof(struct e1000_vf_stats, gprc));
1819 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1821 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1823 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1824 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1825 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1826 dev_info->rx_offload_capa =
1827 DEV_RX_OFFLOAD_VLAN_STRIP |
1828 DEV_RX_OFFLOAD_IPV4_CKSUM |
1829 DEV_RX_OFFLOAD_UDP_CKSUM |
1830 DEV_RX_OFFLOAD_TCP_CKSUM;
1831 dev_info->tx_offload_capa =
1832 DEV_TX_OFFLOAD_VLAN_INSERT |
1833 DEV_TX_OFFLOAD_IPV4_CKSUM |
1834 DEV_TX_OFFLOAD_UDP_CKSUM |
1835 DEV_TX_OFFLOAD_TCP_CKSUM |
1836 DEV_TX_OFFLOAD_SCTP_CKSUM |
1837 DEV_TX_OFFLOAD_TCP_TSO;
1839 switch (hw->mac.type) {
1841 dev_info->max_rx_queues = 4;
1842 dev_info->max_tx_queues = 4;
1843 dev_info->max_vmdq_pools = 0;
1847 dev_info->max_rx_queues = 16;
1848 dev_info->max_tx_queues = 16;
1849 dev_info->max_vmdq_pools = ETH_8_POOLS;
1850 dev_info->vmdq_queue_num = 16;
1854 dev_info->max_rx_queues = 8;
1855 dev_info->max_tx_queues = 8;
1856 dev_info->max_vmdq_pools = ETH_8_POOLS;
1857 dev_info->vmdq_queue_num = 8;
1861 dev_info->max_rx_queues = 8;
1862 dev_info->max_tx_queues = 8;
1863 dev_info->max_vmdq_pools = ETH_8_POOLS;
1864 dev_info->vmdq_queue_num = 8;
1868 dev_info->max_rx_queues = 8;
1869 dev_info->max_tx_queues = 8;
1873 dev_info->max_rx_queues = 4;
1874 dev_info->max_tx_queues = 4;
1875 dev_info->max_vmdq_pools = 0;
1879 dev_info->max_rx_queues = 2;
1880 dev_info->max_tx_queues = 2;
1881 dev_info->max_vmdq_pools = 0;
1885 /* Should not happen */
1888 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
1889 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1890 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
1892 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1894 .pthresh = IGB_DEFAULT_RX_PTHRESH,
1895 .hthresh = IGB_DEFAULT_RX_HTHRESH,
1896 .wthresh = IGB_DEFAULT_RX_WTHRESH,
1898 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1902 dev_info->default_txconf = (struct rte_eth_txconf) {
1904 .pthresh = IGB_DEFAULT_TX_PTHRESH,
1905 .hthresh = IGB_DEFAULT_TX_HTHRESH,
1906 .wthresh = IGB_DEFAULT_TX_WTHRESH,
1911 dev_info->rx_desc_lim = rx_desc_lim;
1912 dev_info->tx_desc_lim = tx_desc_lim;
1916 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1918 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1920 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1921 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1922 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1923 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1924 DEV_RX_OFFLOAD_IPV4_CKSUM |
1925 DEV_RX_OFFLOAD_UDP_CKSUM |
1926 DEV_RX_OFFLOAD_TCP_CKSUM;
1927 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1928 DEV_TX_OFFLOAD_IPV4_CKSUM |
1929 DEV_TX_OFFLOAD_UDP_CKSUM |
1930 DEV_TX_OFFLOAD_TCP_CKSUM |
1931 DEV_TX_OFFLOAD_SCTP_CKSUM |
1932 DEV_TX_OFFLOAD_TCP_TSO;
1933 switch (hw->mac.type) {
1935 dev_info->max_rx_queues = 2;
1936 dev_info->max_tx_queues = 2;
1938 case e1000_vfadapt_i350:
1939 dev_info->max_rx_queues = 1;
1940 dev_info->max_tx_queues = 1;
1943 /* Should not happen */
1947 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1949 .pthresh = IGB_DEFAULT_RX_PTHRESH,
1950 .hthresh = IGB_DEFAULT_RX_HTHRESH,
1951 .wthresh = IGB_DEFAULT_RX_WTHRESH,
1953 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1957 dev_info->default_txconf = (struct rte_eth_txconf) {
1959 .pthresh = IGB_DEFAULT_TX_PTHRESH,
1960 .hthresh = IGB_DEFAULT_TX_HTHRESH,
1961 .wthresh = IGB_DEFAULT_TX_WTHRESH,
1966 dev_info->rx_desc_lim = rx_desc_lim;
1967 dev_info->tx_desc_lim = tx_desc_lim;
1970 /* return 0 means link status changed, -1 means not changed */
1972 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1974 struct e1000_hw *hw =
1975 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1976 struct rte_eth_link link, old;
1977 int link_check, count;
1980 hw->mac.get_link_status = 1;
1982 /* possible wait-to-complete in up to 9 seconds */
1983 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
1984 /* Read the real link status */
1985 switch (hw->phy.media_type) {
1986 case e1000_media_type_copper:
1987 /* Do the work to read phy */
1988 e1000_check_for_link(hw);
1989 link_check = !hw->mac.get_link_status;
1992 case e1000_media_type_fiber:
1993 e1000_check_for_link(hw);
1994 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1998 case e1000_media_type_internal_serdes:
1999 e1000_check_for_link(hw);
2000 link_check = hw->mac.serdes_has_link;
2003 /* VF device is type_unknown */
2004 case e1000_media_type_unknown:
2005 eth_igbvf_link_update(hw);
2006 link_check = !hw->mac.get_link_status;
2012 if (link_check || wait_to_complete == 0)
2014 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
2016 memset(&link, 0, sizeof(link));
2017 rte_igb_dev_atomic_read_link_status(dev, &link);
2020 /* Now we check if a transition has happened */
2022 hw->mac.ops.get_link_up_info(hw, &link.link_speed,
2024 link.link_status = 1;
2025 } else if (!link_check) {
2026 link.link_speed = 0;
2027 link.link_duplex = 0;
2028 link.link_status = 0;
2030 rte_igb_dev_atomic_write_link_status(dev, &link);
2033 if (old.link_status == link.link_status)
2041 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
2042 * For ASF and Pass Through versions of f/w this means
2043 * that the driver is loaded.
2046 igb_hw_control_acquire(struct e1000_hw *hw)
2050 /* Let firmware know the driver has taken over */
2051 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2052 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2056 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
2057 * For ASF and Pass Through versions of f/w this means that the
2058 * driver is no longer loaded.
2061 igb_hw_control_release(struct e1000_hw *hw)
2065 /* Let firmware taken over control of h/w */
2066 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2067 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
2068 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2072 * Bit of a misnomer, what this really means is
2073 * to enable OS management of the system... aka
2074 * to disable special hardware management features.
2077 igb_init_manageability(struct e1000_hw *hw)
2079 if (e1000_enable_mng_pass_thru(hw)) {
2080 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
2081 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2083 /* disable hardware interception of ARP */
2084 manc &= ~(E1000_MANC_ARP_EN);
2086 /* enable receiving management packets to the host */
2087 manc |= E1000_MANC_EN_MNG2HOST;
2088 manc2h |= 1 << 5; /* Mng Port 623 */
2089 manc2h |= 1 << 6; /* Mng Port 664 */
2090 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
2091 E1000_WRITE_REG(hw, E1000_MANC, manc);
2096 igb_release_manageability(struct e1000_hw *hw)
2098 if (e1000_enable_mng_pass_thru(hw)) {
2099 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2101 manc |= E1000_MANC_ARP_EN;
2102 manc &= ~E1000_MANC_EN_MNG2HOST;
2104 E1000_WRITE_REG(hw, E1000_MANC, manc);
2109 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
2111 struct e1000_hw *hw =
2112 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2115 rctl = E1000_READ_REG(hw, E1000_RCTL);
2116 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2117 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2121 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
2123 struct e1000_hw *hw =
2124 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2127 rctl = E1000_READ_REG(hw, E1000_RCTL);
2128 rctl &= (~E1000_RCTL_UPE);
2129 if (dev->data->all_multicast == 1)
2130 rctl |= E1000_RCTL_MPE;
2132 rctl &= (~E1000_RCTL_MPE);
2133 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2137 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
2139 struct e1000_hw *hw =
2140 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2143 rctl = E1000_READ_REG(hw, E1000_RCTL);
2144 rctl |= E1000_RCTL_MPE;
2145 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2149 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
2151 struct e1000_hw *hw =
2152 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2155 if (dev->data->promiscuous == 1)
2156 return; /* must remain in all_multicast mode */
2157 rctl = E1000_READ_REG(hw, E1000_RCTL);
2158 rctl &= (~E1000_RCTL_MPE);
2159 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2163 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2165 struct e1000_hw *hw =
2166 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2167 struct e1000_vfta * shadow_vfta =
2168 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2173 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
2174 E1000_VFTA_ENTRY_MASK);
2175 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
2176 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
2181 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
2183 /* update local VFTA copy */
2184 shadow_vfta->vfta[vid_idx] = vfta;
2190 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
2191 enum rte_vlan_type vlan_type,
2194 struct e1000_hw *hw =
2195 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2196 uint32_t reg = ETHER_TYPE_VLAN;
2199 switch (vlan_type) {
2200 case ETH_VLAN_TYPE_INNER:
2201 reg |= (tpid << 16);
2202 E1000_WRITE_REG(hw, E1000_VET, reg);
2206 PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
2214 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2216 struct e1000_hw *hw =
2217 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2220 /* Filter Table Disable */
2221 reg = E1000_READ_REG(hw, E1000_RCTL);
2222 reg &= ~E1000_RCTL_CFIEN;
2223 reg &= ~E1000_RCTL_VFE;
2224 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2228 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2230 struct e1000_hw *hw =
2231 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2232 struct e1000_vfta * shadow_vfta =
2233 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2237 /* Filter Table Enable, CFI not used for packet acceptance */
2238 reg = E1000_READ_REG(hw, E1000_RCTL);
2239 reg &= ~E1000_RCTL_CFIEN;
2240 reg |= E1000_RCTL_VFE;
2241 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2243 /* restore VFTA table */
2244 for (i = 0; i < IGB_VFTA_SIZE; i++)
2245 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
2249 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2251 struct e1000_hw *hw =
2252 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2255 /* VLAN Mode Disable */
2256 reg = E1000_READ_REG(hw, E1000_CTRL);
2257 reg &= ~E1000_CTRL_VME;
2258 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2262 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2264 struct e1000_hw *hw =
2265 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2268 /* VLAN Mode Enable */
2269 reg = E1000_READ_REG(hw, E1000_CTRL);
2270 reg |= E1000_CTRL_VME;
2271 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2275 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2277 struct e1000_hw *hw =
2278 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2281 /* CTRL_EXT: Extended VLAN */
2282 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2283 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
2284 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2286 /* Update maximum packet length */
2287 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
2288 E1000_WRITE_REG(hw, E1000_RLPML,
2289 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2294 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2296 struct e1000_hw *hw =
2297 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2300 /* CTRL_EXT: Extended VLAN */
2301 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2302 reg |= E1000_CTRL_EXT_EXTEND_VLAN;
2303 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2305 /* Update maximum packet length */
2306 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
2307 E1000_WRITE_REG(hw, E1000_RLPML,
2308 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2313 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2315 if(mask & ETH_VLAN_STRIP_MASK){
2316 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2317 igb_vlan_hw_strip_enable(dev);
2319 igb_vlan_hw_strip_disable(dev);
2322 if(mask & ETH_VLAN_FILTER_MASK){
2323 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2324 igb_vlan_hw_filter_enable(dev);
2326 igb_vlan_hw_filter_disable(dev);
2329 if(mask & ETH_VLAN_EXTEND_MASK){
2330 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2331 igb_vlan_hw_extend_enable(dev);
2333 igb_vlan_hw_extend_disable(dev);
2339 * It enables the interrupt mask and then enable the interrupt.
2342 * Pointer to struct rte_eth_dev.
2345 * - On success, zero.
2346 * - On failure, a negative value.
2349 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
2351 struct e1000_interrupt *intr =
2352 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2354 intr->mask |= E1000_ICR_LSC;
2359 /* It clears the interrupt causes and enables the interrupt.
2360 * It will be called once only during nic initialized.
2363 * Pointer to struct rte_eth_dev.
2366 * - On success, zero.
2367 * - On failure, a negative value.
2369 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
2371 uint32_t mask, regval;
2372 struct e1000_hw *hw =
2373 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2374 struct rte_eth_dev_info dev_info;
2376 memset(&dev_info, 0, sizeof(dev_info));
2377 eth_igb_infos_get(dev, &dev_info);
2379 mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
2380 regval = E1000_READ_REG(hw, E1000_EIMS);
2381 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
2387 * It reads ICR and gets interrupt causes, check it and set a bit flag
2388 * to update link status.
2391 * Pointer to struct rte_eth_dev.
2394 * - On success, zero.
2395 * - On failure, a negative value.
2398 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
2401 struct e1000_hw *hw =
2402 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2403 struct e1000_interrupt *intr =
2404 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2406 igb_intr_disable(hw);
2408 /* read-on-clear nic registers here */
2409 icr = E1000_READ_REG(hw, E1000_ICR);
2412 if (icr & E1000_ICR_LSC) {
2413 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
2416 if (icr & E1000_ICR_VMMB)
2417 intr->flags |= E1000_FLAG_MAILBOX;
2423 * It executes link_update after knowing an interrupt is prsent.
2426 * Pointer to struct rte_eth_dev.
2429 * - On success, zero.
2430 * - On failure, a negative value.
2433 eth_igb_interrupt_action(struct rte_eth_dev *dev)
2435 struct e1000_hw *hw =
2436 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2437 struct e1000_interrupt *intr =
2438 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2439 uint32_t tctl, rctl;
2440 struct rte_eth_link link;
2443 if (intr->flags & E1000_FLAG_MAILBOX) {
2444 igb_pf_mbx_process(dev);
2445 intr->flags &= ~E1000_FLAG_MAILBOX;
2448 igb_intr_enable(dev);
2449 rte_intr_enable(&(dev->pci_dev->intr_handle));
2451 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
2452 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
2454 /* set get_link_status to check register later */
2455 hw->mac.get_link_status = 1;
2456 ret = eth_igb_link_update(dev, 0);
2458 /* check if link has changed */
2462 memset(&link, 0, sizeof(link));
2463 rte_igb_dev_atomic_read_link_status(dev, &link);
2464 if (link.link_status) {
2466 " Port %d: Link Up - speed %u Mbps - %s",
2468 (unsigned)link.link_speed,
2469 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2470 "full-duplex" : "half-duplex");
2472 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2473 dev->data->port_id);
2476 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
2477 dev->pci_dev->addr.domain,
2478 dev->pci_dev->addr.bus,
2479 dev->pci_dev->addr.devid,
2480 dev->pci_dev->addr.function);
2481 tctl = E1000_READ_REG(hw, E1000_TCTL);
2482 rctl = E1000_READ_REG(hw, E1000_RCTL);
2483 if (link.link_status) {
2485 tctl |= E1000_TCTL_EN;
2486 rctl |= E1000_RCTL_EN;
2489 tctl &= ~E1000_TCTL_EN;
2490 rctl &= ~E1000_RCTL_EN;
2492 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2493 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2494 E1000_WRITE_FLUSH(hw);
2495 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
2502 * Interrupt handler which shall be registered at first.
2505 * Pointer to interrupt handle.
2507 * The address of parameter (struct rte_eth_dev *) regsitered before.
2513 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
2516 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2518 eth_igb_interrupt_get_status(dev);
2519 eth_igb_interrupt_action(dev);
2523 eth_igb_led_on(struct rte_eth_dev *dev)
2525 struct e1000_hw *hw;
2527 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2528 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
2532 eth_igb_led_off(struct rte_eth_dev *dev)
2534 struct e1000_hw *hw;
2536 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2537 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
2541 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2543 struct e1000_hw *hw;
2548 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2549 fc_conf->pause_time = hw->fc.pause_time;
2550 fc_conf->high_water = hw->fc.high_water;
2551 fc_conf->low_water = hw->fc.low_water;
2552 fc_conf->send_xon = hw->fc.send_xon;
2553 fc_conf->autoneg = hw->mac.autoneg;
2556 * Return rx_pause and tx_pause status according to actual setting of
2557 * the TFCE and RFCE bits in the CTRL register.
2559 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2560 if (ctrl & E1000_CTRL_TFCE)
2565 if (ctrl & E1000_CTRL_RFCE)
2570 if (rx_pause && tx_pause)
2571 fc_conf->mode = RTE_FC_FULL;
2573 fc_conf->mode = RTE_FC_RX_PAUSE;
2575 fc_conf->mode = RTE_FC_TX_PAUSE;
2577 fc_conf->mode = RTE_FC_NONE;
2583 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2585 struct e1000_hw *hw;
2587 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
2593 uint32_t rx_buf_size;
2594 uint32_t max_high_water;
2597 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2598 if (fc_conf->autoneg != hw->mac.autoneg)
2600 rx_buf_size = igb_get_rx_buffer_size(hw);
2601 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2603 /* At least reserve one Ethernet frame for watermark */
2604 max_high_water = rx_buf_size - ETHER_MAX_LEN;
2605 if ((fc_conf->high_water > max_high_water) ||
2606 (fc_conf->high_water < fc_conf->low_water)) {
2607 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
2608 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
2612 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
2613 hw->fc.pause_time = fc_conf->pause_time;
2614 hw->fc.high_water = fc_conf->high_water;
2615 hw->fc.low_water = fc_conf->low_water;
2616 hw->fc.send_xon = fc_conf->send_xon;
2618 err = e1000_setup_link_generic(hw);
2619 if (err == E1000_SUCCESS) {
2621 /* check if we want to forward MAC frames - driver doesn't have native
2622 * capability to do that, so we'll write the registers ourselves */
2624 rctl = E1000_READ_REG(hw, E1000_RCTL);
2626 /* set or clear MFLCN.PMCF bit depending on configuration */
2627 if (fc_conf->mac_ctrl_frame_fwd != 0)
2628 rctl |= E1000_RCTL_PMCF;
2630 rctl &= ~E1000_RCTL_PMCF;
2632 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2633 E1000_WRITE_FLUSH(hw);
2638 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
2642 #define E1000_RAH_POOLSEL_SHIFT (18)
2644 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2645 uint32_t index, __rte_unused uint32_t pool)
2647 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2650 e1000_rar_set(hw, mac_addr->addr_bytes, index);
2651 rah = E1000_READ_REG(hw, E1000_RAH(index));
2652 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
2653 E1000_WRITE_REG(hw, E1000_RAH(index), rah);
2657 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
2659 uint8_t addr[ETHER_ADDR_LEN];
2660 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2662 memset(addr, 0, sizeof(addr));
2664 e1000_rar_set(hw, addr, index);
2668 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
2669 struct ether_addr *addr)
2671 eth_igb_rar_clear(dev, 0);
2673 eth_igb_rar_set(dev, (void *)addr, 0, 0);
2676 * Virtual Function operations
2679 igbvf_intr_disable(struct e1000_hw *hw)
2681 PMD_INIT_FUNC_TRACE();
2683 /* Clear interrupt mask to stop from interrupts being generated */
2684 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
2686 E1000_WRITE_FLUSH(hw);
2690 igbvf_stop_adapter(struct rte_eth_dev *dev)
2694 struct rte_eth_dev_info dev_info;
2695 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2697 memset(&dev_info, 0, sizeof(dev_info));
2698 eth_igbvf_infos_get(dev, &dev_info);
2700 /* Clear interrupt mask to stop from interrupts being generated */
2701 igbvf_intr_disable(hw);
2703 /* Clear any pending interrupts, flush previous writes */
2704 E1000_READ_REG(hw, E1000_EICR);
2706 /* Disable the transmit unit. Each queue must be disabled. */
2707 for (i = 0; i < dev_info.max_tx_queues; i++)
2708 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
2710 /* Disable the receive unit by stopping each queue */
2711 for (i = 0; i < dev_info.max_rx_queues; i++) {
2712 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
2713 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
2714 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
2715 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
2719 /* flush all queues disables */
2720 E1000_WRITE_FLUSH(hw);
2724 static int eth_igbvf_link_update(struct e1000_hw *hw)
2726 struct e1000_mbx_info *mbx = &hw->mbx;
2727 struct e1000_mac_info *mac = &hw->mac;
2728 int ret_val = E1000_SUCCESS;
2730 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
2733 * We only want to run this if there has been a rst asserted.
2734 * in this case that could mean a link change, device reset,
2735 * or a virtual function reset
2738 /* If we were hit with a reset or timeout drop the link */
2739 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
2740 mac->get_link_status = TRUE;
2742 if (!mac->get_link_status)
2745 /* if link status is down no point in checking to see if pf is up */
2746 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
2749 /* if we passed all the tests above then the link is up and we no
2750 * longer need to check for link */
2751 mac->get_link_status = FALSE;
2759 igbvf_dev_configure(struct rte_eth_dev *dev)
2761 struct rte_eth_conf* conf = &dev->data->dev_conf;
2763 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
2764 dev->data->port_id);
2767 * VF has no ability to enable/disable HW CRC
2768 * Keep the persistent behavior the same as Host PF
2770 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
2771 if (!conf->rxmode.hw_strip_crc) {
2772 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
2773 conf->rxmode.hw_strip_crc = 1;
2776 if (conf->rxmode.hw_strip_crc) {
2777 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
2778 conf->rxmode.hw_strip_crc = 0;
2786 igbvf_dev_start(struct rte_eth_dev *dev)
2788 struct e1000_hw *hw =
2789 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2790 struct e1000_adapter *adapter =
2791 E1000_DEV_PRIVATE(dev->data->dev_private);
2794 PMD_INIT_FUNC_TRACE();
2796 hw->mac.ops.reset_hw(hw);
2797 adapter->stopped = 0;
2800 igbvf_set_vfta_all(dev,1);
2802 eth_igbvf_tx_init(dev);
2804 /* This can fail when allocating mbufs for descriptor rings */
2805 ret = eth_igbvf_rx_init(dev);
2807 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2808 igb_dev_clear_queues(dev);
2816 igbvf_dev_stop(struct rte_eth_dev *dev)
2818 PMD_INIT_FUNC_TRACE();
2820 igbvf_stop_adapter(dev);
2823 * Clear what we set, but we still keep shadow_vfta to
2824 * restore after device starts
2826 igbvf_set_vfta_all(dev,0);
2828 igb_dev_clear_queues(dev);
2832 igbvf_dev_close(struct rte_eth_dev *dev)
2834 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2835 struct e1000_adapter *adapter =
2836 E1000_DEV_PRIVATE(dev->data->dev_private);
2838 PMD_INIT_FUNC_TRACE();
2842 igbvf_dev_stop(dev);
2843 adapter->stopped = 1;
2844 igb_dev_free_queues(dev);
2847 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
2849 struct e1000_mbx_info *mbx = &hw->mbx;
2853 /* After set vlan, vlan strip will also be enabled in igb driver*/
2854 msgbuf[0] = E1000_VF_SET_VLAN;
2856 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
2858 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
2860 err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
2864 err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
2868 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
2869 if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))
2876 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
2878 struct e1000_hw *hw =
2879 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2880 struct e1000_vfta * shadow_vfta =
2881 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2882 int i = 0, j = 0, vfta = 0, mask = 1;
2884 for (i = 0; i < IGB_VFTA_SIZE; i++){
2885 vfta = shadow_vfta->vfta[i];
2888 for (j = 0; j < 32; j++){
2891 (uint16_t)((i<<5)+j), on);
2900 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2902 struct e1000_hw *hw =
2903 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2904 struct e1000_vfta * shadow_vfta =
2905 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2906 uint32_t vid_idx = 0;
2907 uint32_t vid_bit = 0;
2910 PMD_INIT_FUNC_TRACE();
2912 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
2913 ret = igbvf_set_vfta(hw, vlan_id, !!on);
2915 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
2918 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
2919 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
2921 /*Save what we set and retore it after device reset*/
2923 shadow_vfta->vfta[vid_idx] |= vid_bit;
2925 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
2931 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
2933 struct e1000_hw *hw =
2934 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2936 /* index is not used by rar_set() */
2937 hw->mac.ops.rar_set(hw, (void *)addr, 0);
2942 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
2943 struct rte_eth_rss_reta_entry64 *reta_conf,
2948 uint16_t idx, shift;
2949 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2951 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2952 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2953 "(%d) doesn't match the number hardware can supported "
2954 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2958 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2959 idx = i / RTE_RETA_GROUP_SIZE;
2960 shift = i % RTE_RETA_GROUP_SIZE;
2961 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2965 if (mask == IGB_4_BIT_MASK)
2968 r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2969 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
2970 if (mask & (0x1 << j))
2971 reta |= reta_conf[idx].reta[shift + j] <<
2974 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
2976 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
2983 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
2984 struct rte_eth_rss_reta_entry64 *reta_conf,
2989 uint16_t idx, shift;
2990 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2992 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2993 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2994 "(%d) doesn't match the number hardware can supported "
2995 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2999 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3000 idx = i / RTE_RETA_GROUP_SIZE;
3001 shift = i % RTE_RETA_GROUP_SIZE;
3002 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3006 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3007 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
3008 if (mask & (0x1 << j))
3009 reta_conf[idx].reta[shift + j] =
3010 ((reta >> (CHAR_BIT * j)) &
3018 #define MAC_TYPE_FILTER_SUP(type) do {\
3019 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
3020 (type) != e1000_82576)\
3025 eth_igb_syn_filter_set(struct rte_eth_dev *dev,
3026 struct rte_eth_syn_filter *filter,
3029 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3030 uint32_t synqf, rfctl;
3032 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3035 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
3038 if (synqf & E1000_SYN_FILTER_ENABLE)
3041 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
3042 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
3044 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3045 if (filter->hig_pri)
3046 rfctl |= E1000_RFCTL_SYNQFP;
3048 rfctl &= ~E1000_RFCTL_SYNQFP;
3050 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
3052 if (!(synqf & E1000_SYN_FILTER_ENABLE))
3057 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
3058 E1000_WRITE_FLUSH(hw);
3063 eth_igb_syn_filter_get(struct rte_eth_dev *dev,
3064 struct rte_eth_syn_filter *filter)
3066 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3067 uint32_t synqf, rfctl;
3069 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
3070 if (synqf & E1000_SYN_FILTER_ENABLE) {
3071 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3072 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
3073 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
3074 E1000_SYN_FILTER_QUEUE_SHIFT);
3082 eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
3083 enum rte_filter_op filter_op,
3086 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3089 MAC_TYPE_FILTER_SUP(hw->mac.type);
3091 if (filter_op == RTE_ETH_FILTER_NOP)
3095 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
3100 switch (filter_op) {
3101 case RTE_ETH_FILTER_ADD:
3102 ret = eth_igb_syn_filter_set(dev,
3103 (struct rte_eth_syn_filter *)arg,
3106 case RTE_ETH_FILTER_DELETE:
3107 ret = eth_igb_syn_filter_set(dev,
3108 (struct rte_eth_syn_filter *)arg,
3111 case RTE_ETH_FILTER_GET:
3112 ret = eth_igb_syn_filter_get(dev,
3113 (struct rte_eth_syn_filter *)arg);
3116 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
3124 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
3125 if ((type) != e1000_82580 && (type) != e1000_i350)\
3129 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
3131 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
3132 struct e1000_2tuple_filter_info *filter_info)
3134 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3136 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3137 return -EINVAL; /* filter index is out of range. */
3138 if (filter->tcp_flags > TCP_FLAG_ALL)
3139 return -EINVAL; /* flags is invalid. */
3141 switch (filter->dst_port_mask) {
3143 filter_info->dst_port_mask = 0;
3144 filter_info->dst_port = filter->dst_port;
3147 filter_info->dst_port_mask = 1;
3150 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3154 switch (filter->proto_mask) {
3156 filter_info->proto_mask = 0;
3157 filter_info->proto = filter->proto;
3160 filter_info->proto_mask = 1;
3163 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3167 filter_info->priority = (uint8_t)filter->priority;
3168 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3169 filter_info->tcp_flags = filter->tcp_flags;
3171 filter_info->tcp_flags = 0;
3176 static inline struct e1000_2tuple_filter *
3177 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
3178 struct e1000_2tuple_filter_info *key)
3180 struct e1000_2tuple_filter *it;
3182 TAILQ_FOREACH(it, filter_list, entries) {
3183 if (memcmp(key, &it->filter_info,
3184 sizeof(struct e1000_2tuple_filter_info)) == 0) {
3192 * igb_add_2tuple_filter - add a 2tuple filter
3195 * dev: Pointer to struct rte_eth_dev.
3196 * ntuple_filter: ponter to the filter that will be added.
3199 * - On success, zero.
3200 * - On failure, a negative value.
3203 igb_add_2tuple_filter(struct rte_eth_dev *dev,
3204 struct rte_eth_ntuple_filter *ntuple_filter)
3206 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3207 struct e1000_filter_info *filter_info =
3208 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3209 struct e1000_2tuple_filter *filter;
3210 uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
3211 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3214 filter = rte_zmalloc("e1000_2tuple_filter",
3215 sizeof(struct e1000_2tuple_filter), 0);
3219 ret = ntuple_filter_to_2tuple(ntuple_filter,
3220 &filter->filter_info);
3225 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3226 &filter->filter_info) != NULL) {
3227 PMD_DRV_LOG(ERR, "filter exists.");
3231 filter->queue = ntuple_filter->queue;
3234 * look for an unused 2tuple filter index,
3235 * and insert the filter to list.
3237 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
3238 if (!(filter_info->twotuple_mask & (1 << i))) {
3239 filter_info->twotuple_mask |= 1 << i;
3241 TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
3247 if (i >= E1000_MAX_TTQF_FILTERS) {
3248 PMD_DRV_LOG(ERR, "2tuple filters are full.");
3253 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3254 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3255 imir |= E1000_IMIR_PORT_BP;
3257 imir &= ~E1000_IMIR_PORT_BP;
3259 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3261 ttqf |= E1000_TTQF_QUEUE_ENABLE;
3262 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
3263 ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
3264 if (filter->filter_info.proto_mask == 0)
3265 ttqf &= ~E1000_TTQF_MASK_ENABLE;
3267 /* tcp flags bits setting. */
3268 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
3269 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
3270 imir_ext |= E1000_IMIREXT_CTRL_URG;
3271 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
3272 imir_ext |= E1000_IMIREXT_CTRL_ACK;
3273 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
3274 imir_ext |= E1000_IMIREXT_CTRL_PSH;
3275 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
3276 imir_ext |= E1000_IMIREXT_CTRL_RST;
3277 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
3278 imir_ext |= E1000_IMIREXT_CTRL_SYN;
3279 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
3280 imir_ext |= E1000_IMIREXT_CTRL_FIN;
3282 imir_ext |= E1000_IMIREXT_CTRL_BP;
3283 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3284 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
3285 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3290 * igb_remove_2tuple_filter - remove a 2tuple filter
3293 * dev: Pointer to struct rte_eth_dev.
3294 * ntuple_filter: ponter to the filter that will be removed.
3297 * - On success, zero.
3298 * - On failure, a negative value.
3301 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
3302 struct rte_eth_ntuple_filter *ntuple_filter)
3304 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3305 struct e1000_filter_info *filter_info =
3306 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3307 struct e1000_2tuple_filter_info filter_2tuple;
3308 struct e1000_2tuple_filter *filter;
3311 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
3312 ret = ntuple_filter_to_2tuple(ntuple_filter,
3317 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3319 if (filter == NULL) {
3320 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3324 filter_info->twotuple_mask &= ~(1 << filter->index);
3325 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
3328 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
3329 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3330 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3334 static inline struct e1000_flex_filter *
3335 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
3336 struct e1000_flex_filter_info *key)
3338 struct e1000_flex_filter *it;
3340 TAILQ_FOREACH(it, filter_list, entries) {
3341 if (memcmp(key, &it->filter_info,
3342 sizeof(struct e1000_flex_filter_info)) == 0)
3350 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
3351 struct rte_eth_flex_filter *filter,
3354 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3355 struct e1000_filter_info *filter_info =
3356 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3357 struct e1000_flex_filter *flex_filter, *it;
3358 uint32_t wufc, queueing, mask;
3360 uint8_t shift, i, j = 0;
3362 flex_filter = rte_zmalloc("e1000_flex_filter",
3363 sizeof(struct e1000_flex_filter), 0);
3364 if (flex_filter == NULL)
3367 flex_filter->filter_info.len = filter->len;
3368 flex_filter->filter_info.priority = filter->priority;
3369 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
3370 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
3372 /* reverse bits in flex filter's mask*/
3373 for (shift = 0; shift < CHAR_BIT; shift++) {
3374 if (filter->mask[i] & (0x01 << shift))
3375 mask |= (0x80 >> shift);
3377 flex_filter->filter_info.mask[i] = mask;
3380 wufc = E1000_READ_REG(hw, E1000_WUFC);
3381 if (flex_filter->index < E1000_MAX_FHFT)
3382 reg_off = E1000_FHFT(flex_filter->index);
3384 reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
3387 if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
3388 &flex_filter->filter_info) != NULL) {
3389 PMD_DRV_LOG(ERR, "filter exists.");
3390 rte_free(flex_filter);
3393 flex_filter->queue = filter->queue;
3395 * look for an unused flex filter index
3396 * and insert the filter into the list.
3398 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
3399 if (!(filter_info->flex_mask & (1 << i))) {
3400 filter_info->flex_mask |= 1 << i;
3401 flex_filter->index = i;
3402 TAILQ_INSERT_TAIL(&filter_info->flex_list,
3408 if (i >= E1000_MAX_FLEX_FILTERS) {
3409 PMD_DRV_LOG(ERR, "flex filters are full.");
3410 rte_free(flex_filter);
3414 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
3415 (E1000_WUFC_FLX0 << flex_filter->index));
3416 queueing = filter->len |
3417 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
3418 (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
3419 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
3421 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
3422 E1000_WRITE_REG(hw, reg_off,
3423 flex_filter->filter_info.dwords[j]);
3424 reg_off += sizeof(uint32_t);
3425 E1000_WRITE_REG(hw, reg_off,
3426 flex_filter->filter_info.dwords[++j]);
3427 reg_off += sizeof(uint32_t);
3428 E1000_WRITE_REG(hw, reg_off,
3429 (uint32_t)flex_filter->filter_info.mask[i]);
3430 reg_off += sizeof(uint32_t) * 2;
3434 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3435 &flex_filter->filter_info);
3437 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3438 rte_free(flex_filter);
3442 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
3443 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
3444 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
3445 (~(E1000_WUFC_FLX0 << it->index)));
3447 filter_info->flex_mask &= ~(1 << it->index);
3448 TAILQ_REMOVE(&filter_info->flex_list, it, entries);
3450 rte_free(flex_filter);
3457 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
3458 struct rte_eth_flex_filter *filter)
3460 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3461 struct e1000_filter_info *filter_info =
3462 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3463 struct e1000_flex_filter flex_filter, *it;
3464 uint32_t wufc, queueing, wufc_en = 0;
3466 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
3467 flex_filter.filter_info.len = filter->len;
3468 flex_filter.filter_info.priority = filter->priority;
3469 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
3470 memcpy(flex_filter.filter_info.mask, filter->mask,
3471 RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
3473 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3474 &flex_filter.filter_info);
3476 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3480 wufc = E1000_READ_REG(hw, E1000_WUFC);
3481 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
3483 if ((wufc & wufc_en) == wufc_en) {
3484 uint32_t reg_off = 0;
3485 if (it->index < E1000_MAX_FHFT)
3486 reg_off = E1000_FHFT(it->index);
3488 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
3490 queueing = E1000_READ_REG(hw,
3491 reg_off + E1000_FHFT_QUEUEING_OFFSET);
3492 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
3493 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
3494 E1000_FHFT_QUEUEING_PRIO_SHIFT;
3495 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
3496 E1000_FHFT_QUEUEING_QUEUE_SHIFT;
3503 eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
3504 enum rte_filter_op filter_op,
3507 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3508 struct rte_eth_flex_filter *filter;
3511 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
3513 if (filter_op == RTE_ETH_FILTER_NOP)
3517 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
3522 filter = (struct rte_eth_flex_filter *)arg;
3523 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
3524 || filter->len % sizeof(uint64_t) != 0) {
3525 PMD_DRV_LOG(ERR, "filter's length is out of range");
3528 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
3529 PMD_DRV_LOG(ERR, "filter's priority is out of range");
3533 switch (filter_op) {
3534 case RTE_ETH_FILTER_ADD:
3535 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
3537 case RTE_ETH_FILTER_DELETE:
3538 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
3540 case RTE_ETH_FILTER_GET:
3541 ret = eth_igb_get_flex_filter(dev, filter);
3544 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
3552 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
3554 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
3555 struct e1000_5tuple_filter_info *filter_info)
3557 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
3559 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3560 return -EINVAL; /* filter index is out of range. */
3561 if (filter->tcp_flags > TCP_FLAG_ALL)
3562 return -EINVAL; /* flags is invalid. */
3564 switch (filter->dst_ip_mask) {
3566 filter_info->dst_ip_mask = 0;
3567 filter_info->dst_ip = filter->dst_ip;
3570 filter_info->dst_ip_mask = 1;
3573 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3577 switch (filter->src_ip_mask) {
3579 filter_info->src_ip_mask = 0;
3580 filter_info->src_ip = filter->src_ip;
3583 filter_info->src_ip_mask = 1;
3586 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3590 switch (filter->dst_port_mask) {
3592 filter_info->dst_port_mask = 0;
3593 filter_info->dst_port = filter->dst_port;
3596 filter_info->dst_port_mask = 1;
3599 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3603 switch (filter->src_port_mask) {
3605 filter_info->src_port_mask = 0;
3606 filter_info->src_port = filter->src_port;
3609 filter_info->src_port_mask = 1;
3612 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3616 switch (filter->proto_mask) {
3618 filter_info->proto_mask = 0;
3619 filter_info->proto = filter->proto;
3622 filter_info->proto_mask = 1;
3625 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3629 filter_info->priority = (uint8_t)filter->priority;
3630 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3631 filter_info->tcp_flags = filter->tcp_flags;
3633 filter_info->tcp_flags = 0;
3638 static inline struct e1000_5tuple_filter *
3639 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
3640 struct e1000_5tuple_filter_info *key)
3642 struct e1000_5tuple_filter *it;
3644 TAILQ_FOREACH(it, filter_list, entries) {
3645 if (memcmp(key, &it->filter_info,
3646 sizeof(struct e1000_5tuple_filter_info)) == 0) {
3654 * igb_add_5tuple_filter_82576 - add a 5tuple filter
3657 * dev: Pointer to struct rte_eth_dev.
3658 * ntuple_filter: ponter to the filter that will be added.
3661 * - On success, zero.
3662 * - On failure, a negative value.
3665 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
3666 struct rte_eth_ntuple_filter *ntuple_filter)
3668 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3669 struct e1000_filter_info *filter_info =
3670 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3671 struct e1000_5tuple_filter *filter;
3672 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
3673 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3677 filter = rte_zmalloc("e1000_5tuple_filter",
3678 sizeof(struct e1000_5tuple_filter), 0);
3682 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3683 &filter->filter_info);
3689 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3690 &filter->filter_info) != NULL) {
3691 PMD_DRV_LOG(ERR, "filter exists.");
3695 filter->queue = ntuple_filter->queue;
3698 * look for an unused 5tuple filter index,
3699 * and insert the filter to list.
3701 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
3702 if (!(filter_info->fivetuple_mask & (1 << i))) {
3703 filter_info->fivetuple_mask |= 1 << i;
3705 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3711 if (i >= E1000_MAX_FTQF_FILTERS) {
3712 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3717 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
3718 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
3719 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
3720 if (filter->filter_info.dst_ip_mask == 0)
3721 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
3722 if (filter->filter_info.src_port_mask == 0)
3723 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
3724 if (filter->filter_info.proto_mask == 0)
3725 ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
3726 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
3727 E1000_FTQF_QUEUE_MASK;
3728 ftqf |= E1000_FTQF_QUEUE_ENABLE;
3729 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
3730 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
3731 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
3733 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
3734 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
3736 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3737 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3738 imir |= E1000_IMIR_PORT_BP;
3740 imir &= ~E1000_IMIR_PORT_BP;
3741 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3743 /* tcp flags bits setting. */
3744 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
3745 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
3746 imir_ext |= E1000_IMIREXT_CTRL_URG;
3747 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
3748 imir_ext |= E1000_IMIREXT_CTRL_ACK;
3749 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
3750 imir_ext |= E1000_IMIREXT_CTRL_PSH;
3751 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
3752 imir_ext |= E1000_IMIREXT_CTRL_RST;
3753 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
3754 imir_ext |= E1000_IMIREXT_CTRL_SYN;
3755 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
3756 imir_ext |= E1000_IMIREXT_CTRL_FIN;
3758 imir_ext |= E1000_IMIREXT_CTRL_BP;
3759 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3760 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3765 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
3768 * dev: Pointer to struct rte_eth_dev.
3769 * ntuple_filter: ponter to the filter that will be removed.
3772 * - On success, zero.
3773 * - On failure, a negative value.
3776 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
3777 struct rte_eth_ntuple_filter *ntuple_filter)
3779 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3780 struct e1000_filter_info *filter_info =
3781 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3782 struct e1000_5tuple_filter_info filter_5tuple;
3783 struct e1000_5tuple_filter *filter;
3786 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
3787 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3792 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3794 if (filter == NULL) {
3795 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3799 filter_info->fivetuple_mask &= ~(1 << filter->index);
3800 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3803 E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
3804 E1000_FTQF_VF_BP | E1000_FTQF_MASK);
3805 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
3806 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
3807 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
3808 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3809 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3814 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3817 struct e1000_hw *hw;
3818 struct rte_eth_dev_info dev_info;
3819 uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
3822 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3824 #ifdef RTE_LIBRTE_82571_SUPPORT
3825 /* XXX: not bigger than max_rx_pktlen */
3826 if (hw->mac.type == e1000_82571)
3829 eth_igb_infos_get(dev, &dev_info);
3831 /* check that mtu is within the allowed range */
3832 if ((mtu < ETHER_MIN_MTU) ||
3833 (frame_size > dev_info.max_rx_pktlen))
3836 /* refuse mtu that requires the support of scattered packets when this
3837 * feature has not been enabled before. */
3838 if (!dev->data->scattered_rx &&
3839 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
3842 rctl = E1000_READ_REG(hw, E1000_RCTL);
3844 /* switch to jumbo mode if needed */
3845 if (frame_size > ETHER_MAX_LEN) {
3846 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3847 rctl |= E1000_RCTL_LPE;
3849 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3850 rctl &= ~E1000_RCTL_LPE;
3852 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3854 /* update max frame size */
3855 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3857 E1000_WRITE_REG(hw, E1000_RLPML,
3858 dev->data->dev_conf.rxmode.max_rx_pkt_len);
3864 * igb_add_del_ntuple_filter - add or delete a ntuple filter
3867 * dev: Pointer to struct rte_eth_dev.
3868 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3869 * add: if true, add filter, if false, remove filter
3872 * - On success, zero.
3873 * - On failure, a negative value.
3876 igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
3877 struct rte_eth_ntuple_filter *ntuple_filter,
3880 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3883 switch (ntuple_filter->flags) {
3884 case RTE_5TUPLE_FLAGS:
3885 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3886 if (hw->mac.type != e1000_82576)
3889 ret = igb_add_5tuple_filter_82576(dev,
3892 ret = igb_remove_5tuple_filter_82576(dev,
3895 case RTE_2TUPLE_FLAGS:
3896 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3897 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3900 ret = igb_add_2tuple_filter(dev, ntuple_filter);
3902 ret = igb_remove_2tuple_filter(dev, ntuple_filter);
3913 * igb_get_ntuple_filter - get a ntuple filter
3916 * dev: Pointer to struct rte_eth_dev.
3917 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3920 * - On success, zero.
3921 * - On failure, a negative value.
3924 igb_get_ntuple_filter(struct rte_eth_dev *dev,
3925 struct rte_eth_ntuple_filter *ntuple_filter)
3927 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3928 struct e1000_filter_info *filter_info =
3929 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3930 struct e1000_5tuple_filter_info filter_5tuple;
3931 struct e1000_2tuple_filter_info filter_2tuple;
3932 struct e1000_5tuple_filter *p_5tuple_filter;
3933 struct e1000_2tuple_filter *p_2tuple_filter;
3936 switch (ntuple_filter->flags) {
3937 case RTE_5TUPLE_FLAGS:
3938 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3939 if (hw->mac.type != e1000_82576)
3941 memset(&filter_5tuple,
3943 sizeof(struct e1000_5tuple_filter_info));
3944 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3948 p_5tuple_filter = igb_5tuple_filter_lookup_82576(
3949 &filter_info->fivetuple_list,
3951 if (p_5tuple_filter == NULL) {
3952 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3955 ntuple_filter->queue = p_5tuple_filter->queue;
3957 case RTE_2TUPLE_FLAGS:
3958 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3959 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3961 memset(&filter_2tuple,
3963 sizeof(struct e1000_2tuple_filter_info));
3964 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
3967 p_2tuple_filter = igb_2tuple_filter_lookup(
3968 &filter_info->twotuple_list,
3970 if (p_2tuple_filter == NULL) {
3971 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3974 ntuple_filter->queue = p_2tuple_filter->queue;
3985 * igb_ntuple_filter_handle - Handle operations for ntuple filter.
3986 * @dev: pointer to rte_eth_dev structure
3987 * @filter_op:operation will be taken.
3988 * @arg: a pointer to specific structure corresponding to the filter_op
3991 igb_ntuple_filter_handle(struct rte_eth_dev *dev,
3992 enum rte_filter_op filter_op,
3995 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3998 MAC_TYPE_FILTER_SUP(hw->mac.type);
4000 if (filter_op == RTE_ETH_FILTER_NOP)
4004 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
4009 switch (filter_op) {
4010 case RTE_ETH_FILTER_ADD:
4011 ret = igb_add_del_ntuple_filter(dev,
4012 (struct rte_eth_ntuple_filter *)arg,
4015 case RTE_ETH_FILTER_DELETE:
4016 ret = igb_add_del_ntuple_filter(dev,
4017 (struct rte_eth_ntuple_filter *)arg,
4020 case RTE_ETH_FILTER_GET:
4021 ret = igb_get_ntuple_filter(dev,
4022 (struct rte_eth_ntuple_filter *)arg);
4025 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
4033 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
4038 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4039 if (filter_info->ethertype_filters[i] == ethertype &&
4040 (filter_info->ethertype_mask & (1 << i)))
4047 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
4052 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4053 if (!(filter_info->ethertype_mask & (1 << i))) {
4054 filter_info->ethertype_mask |= 1 << i;
4055 filter_info->ethertype_filters[i] = ethertype;
4063 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
4066 if (idx >= E1000_MAX_ETQF_FILTERS)
4068 filter_info->ethertype_mask &= ~(1 << idx);
4069 filter_info->ethertype_filters[idx] = 0;
4075 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
4076 struct rte_eth_ethertype_filter *filter,
4079 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4080 struct e1000_filter_info *filter_info =
4081 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4085 if (filter->ether_type == ETHER_TYPE_IPv4 ||
4086 filter->ether_type == ETHER_TYPE_IPv6) {
4087 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4088 " ethertype filter.", filter->ether_type);
4092 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4093 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4096 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4097 PMD_DRV_LOG(ERR, "drop option is unsupported.");
4101 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
4102 if (ret >= 0 && add) {
4103 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4104 filter->ether_type);
4107 if (ret < 0 && !add) {
4108 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4109 filter->ether_type);
4114 ret = igb_ethertype_filter_insert(filter_info,
4115 filter->ether_type);
4117 PMD_DRV_LOG(ERR, "ethertype filters are full.");
4121 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
4122 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
4123 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
4125 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
4129 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
4130 E1000_WRITE_FLUSH(hw);
4136 igb_get_ethertype_filter(struct rte_eth_dev *dev,
4137 struct rte_eth_ethertype_filter *filter)
4139 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4140 struct e1000_filter_info *filter_info =
4141 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4145 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
4147 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4148 filter->ether_type);
4152 etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
4153 if (etqf & E1000_ETQF_FILTER_ENABLE) {
4154 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
4156 filter->queue = (etqf & E1000_ETQF_QUEUE) >>
4157 E1000_ETQF_QUEUE_SHIFT;
4165 * igb_ethertype_filter_handle - Handle operations for ethertype filter.
4166 * @dev: pointer to rte_eth_dev structure
4167 * @filter_op:operation will be taken.
4168 * @arg: a pointer to specific structure corresponding to the filter_op
4171 igb_ethertype_filter_handle(struct rte_eth_dev *dev,
4172 enum rte_filter_op filter_op,
4175 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4178 MAC_TYPE_FILTER_SUP(hw->mac.type);
4180 if (filter_op == RTE_ETH_FILTER_NOP)
4184 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
4189 switch (filter_op) {
4190 case RTE_ETH_FILTER_ADD:
4191 ret = igb_add_del_ethertype_filter(dev,
4192 (struct rte_eth_ethertype_filter *)arg,
4195 case RTE_ETH_FILTER_DELETE:
4196 ret = igb_add_del_ethertype_filter(dev,
4197 (struct rte_eth_ethertype_filter *)arg,
4200 case RTE_ETH_FILTER_GET:
4201 ret = igb_get_ethertype_filter(dev,
4202 (struct rte_eth_ethertype_filter *)arg);
4205 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
4213 eth_igb_filter_ctrl(struct rte_eth_dev *dev,
4214 enum rte_filter_type filter_type,
4215 enum rte_filter_op filter_op,
4220 switch (filter_type) {
4221 case RTE_ETH_FILTER_NTUPLE:
4222 ret = igb_ntuple_filter_handle(dev, filter_op, arg);
4224 case RTE_ETH_FILTER_ETHERTYPE:
4225 ret = igb_ethertype_filter_handle(dev, filter_op, arg);
4227 case RTE_ETH_FILTER_SYN:
4228 ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
4230 case RTE_ETH_FILTER_FLEXIBLE:
4231 ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
4234 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4243 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
4244 struct ether_addr *mc_addr_set,
4245 uint32_t nb_mc_addr)
4247 struct e1000_hw *hw;
4249 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4250 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
4255 igb_read_systime_cyclecounter(struct rte_eth_dev *dev)
4257 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4258 uint64_t systime_cycles;
4260 switch (hw->mac.type) {
4264 * Need to read System Time Residue Register to be able
4265 * to read the other two registers.
4267 E1000_READ_REG(hw, E1000_SYSTIMR);
4268 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
4269 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4270 systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4277 * Need to read System Time Residue Register to be able
4278 * to read the other two registers.
4280 E1000_READ_REG(hw, E1000_SYSTIMR);
4281 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4282 /* Only the 8 LSB are valid. */
4283 systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH)
4287 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4288 systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4293 return systime_cycles;
4297 igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4299 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4300 uint64_t rx_tstamp_cycles;
4302 switch (hw->mac.type) {
4305 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
4306 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4307 rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4313 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4314 /* Only the 8 LSB are valid. */
4315 rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH)
4319 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4320 rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4325 return rx_tstamp_cycles;
4329 igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4331 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4332 uint64_t tx_tstamp_cycles;
4334 switch (hw->mac.type) {
4337 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
4338 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4339 tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4345 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4346 /* Only the 8 LSB are valid. */
4347 tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH)
4351 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4352 tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4357 return tx_tstamp_cycles;
4361 igb_start_timecounters(struct rte_eth_dev *dev)
4363 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4364 struct e1000_adapter *adapter =
4365 (struct e1000_adapter *)dev->data->dev_private;
4366 uint32_t incval = 1;
4368 uint64_t mask = E1000_CYCLECOUNTER_MASK;
4370 switch (hw->mac.type) {
4374 /* 32 LSB bits + 8 MSB bits = 40 bits */
4375 mask = (1ULL << 40) - 1;
4380 * Start incrementing the register
4381 * used to timestamp PTP packets.
4383 E1000_WRITE_REG(hw, E1000_TIMINCA, incval);
4386 incval = E1000_INCVALUE_82576;
4387 shift = IGB_82576_TSYNC_SHIFT;
4388 E1000_WRITE_REG(hw, E1000_TIMINCA,
4389 E1000_INCPERIOD_82576 | incval);
4396 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4397 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4398 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4400 adapter->systime_tc.cc_mask = mask;
4401 adapter->systime_tc.cc_shift = shift;
4402 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4404 adapter->rx_tstamp_tc.cc_mask = mask;
4405 adapter->rx_tstamp_tc.cc_shift = shift;
4406 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4408 adapter->tx_tstamp_tc.cc_mask = mask;
4409 adapter->tx_tstamp_tc.cc_shift = shift;
4410 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4414 igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4416 struct e1000_adapter *adapter =
4417 (struct e1000_adapter *)dev->data->dev_private;
4419 adapter->systime_tc.nsec += delta;
4420 adapter->rx_tstamp_tc.nsec += delta;
4421 adapter->tx_tstamp_tc.nsec += delta;
4427 igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4430 struct e1000_adapter *adapter =
4431 (struct e1000_adapter *)dev->data->dev_private;
4433 ns = rte_timespec_to_ns(ts);
4435 /* Set the timecounters to a new value. */
4436 adapter->systime_tc.nsec = ns;
4437 adapter->rx_tstamp_tc.nsec = ns;
4438 adapter->tx_tstamp_tc.nsec = ns;
4444 igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4446 uint64_t ns, systime_cycles;
4447 struct e1000_adapter *adapter =
4448 (struct e1000_adapter *)dev->data->dev_private;
4450 systime_cycles = igb_read_systime_cyclecounter(dev);
4451 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4452 *ts = rte_ns_to_timespec(ns);
4458 igb_timesync_enable(struct rte_eth_dev *dev)
4460 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4464 /* Stop the timesync system time. */
4465 E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0);
4466 /* Reset the timesync system time value. */
4467 switch (hw->mac.type) {
4473 E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0);
4476 E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0);
4477 E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0);
4480 /* Not supported. */
4484 /* Enable system time for it isn't on by default. */
4485 tsauxc = E1000_READ_REG(hw, E1000_TSAUXC);
4486 tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME;
4487 E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc);
4489 igb_start_timecounters(dev);
4491 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4492 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
4494 E1000_ETQF_FILTER_ENABLE |
4497 /* Enable timestamping of received PTP packets. */
4498 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4499 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
4500 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
4502 /* Enable Timestamping of transmitted PTP packets. */
4503 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4504 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
4505 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
4511 igb_timesync_disable(struct rte_eth_dev *dev)
4513 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4516 /* Disable timestamping of transmitted PTP packets. */
4517 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4518 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
4519 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
4521 /* Disable timestamping of received PTP packets. */
4522 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4523 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
4524 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
4526 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4527 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
4529 /* Stop incrementating the System Time registers. */
4530 E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
4536 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4537 struct timespec *timestamp,
4538 uint32_t flags __rte_unused)
4540 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4541 struct e1000_adapter *adapter =
4542 (struct e1000_adapter *)dev->data->dev_private;
4543 uint32_t tsync_rxctl;
4544 uint64_t rx_tstamp_cycles;
4547 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4548 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
4551 rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev);
4552 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4553 *timestamp = rte_ns_to_timespec(ns);
4559 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4560 struct timespec *timestamp)
4562 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4563 struct e1000_adapter *adapter =
4564 (struct e1000_adapter *)dev->data->dev_private;
4565 uint32_t tsync_txctl;
4566 uint64_t tx_tstamp_cycles;
4569 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4570 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
4573 tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev);
4574 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4575 *timestamp = rte_ns_to_timespec(ns);
4581 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4585 const struct reg_info *reg_group;
4587 while ((reg_group = igb_regs[g_ind++]))
4588 count += igb_reg_group_count(reg_group);
4594 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4598 const struct reg_info *reg_group;
4600 while ((reg_group = igbvf_regs[g_ind++]))
4601 count += igb_reg_group_count(reg_group);
4607 eth_igb_get_regs(struct rte_eth_dev *dev,
4608 struct rte_dev_reg_info *regs)
4610 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4611 uint32_t *data = regs->data;
4614 const struct reg_info *reg_group;
4616 /* Support only full register dump */
4617 if ((regs->length == 0) ||
4618 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
4619 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4621 while ((reg_group = igb_regs[g_ind++]))
4622 count += igb_read_regs_group(dev, &data[count],
4631 igbvf_get_regs(struct rte_eth_dev *dev,
4632 struct rte_dev_reg_info *regs)
4634 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4635 uint32_t *data = regs->data;
4638 const struct reg_info *reg_group;
4640 /* Support only full register dump */
4641 if ((regs->length == 0) ||
4642 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
4643 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4645 while ((reg_group = igbvf_regs[g_ind++]))
4646 count += igb_read_regs_group(dev, &data[count],
4655 eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
4657 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4659 /* Return unit is byte count */
4660 return hw->nvm.word_size * 2;
4664 eth_igb_get_eeprom(struct rte_eth_dev *dev,
4665 struct rte_dev_eeprom_info *in_eeprom)
4667 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4668 struct e1000_nvm_info *nvm = &hw->nvm;
4669 uint16_t *data = in_eeprom->data;
4672 first = in_eeprom->offset >> 1;
4673 length = in_eeprom->length >> 1;
4674 if ((first >= hw->nvm.word_size) ||
4675 ((first + length) >= hw->nvm.word_size))
4678 in_eeprom->magic = hw->vendor_id |
4679 ((uint32_t)hw->device_id << 16);
4681 if ((nvm->ops.read) == NULL)
4684 return nvm->ops.read(hw, first, length, data);
4688 eth_igb_set_eeprom(struct rte_eth_dev *dev,
4689 struct rte_dev_eeprom_info *in_eeprom)
4691 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4692 struct e1000_nvm_info *nvm = &hw->nvm;
4693 uint16_t *data = in_eeprom->data;
4696 first = in_eeprom->offset >> 1;
4697 length = in_eeprom->length >> 1;
4698 if ((first >= hw->nvm.word_size) ||
4699 ((first + length) >= hw->nvm.word_size))
4702 in_eeprom->magic = (uint32_t)hw->vendor_id |
4703 ((uint32_t)hw->device_id << 16);
4705 if ((nvm->ops.write) == NULL)
4707 return nvm->ops.write(hw, first, length, data);
4710 static struct rte_driver pmd_igb_drv = {
4712 .init = rte_igb_pmd_init,
4715 static struct rte_driver pmd_igbvf_drv = {
4717 .init = rte_igbvf_pmd_init,
4721 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4723 struct e1000_hw *hw =
4724 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4725 uint32_t mask = 1 << queue_id;
4727 E1000_WRITE_REG(hw, E1000_EIMC, mask);
4728 E1000_WRITE_FLUSH(hw);
4734 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4736 struct e1000_hw *hw =
4737 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4738 uint32_t mask = 1 << queue_id;
4741 regval = E1000_READ_REG(hw, E1000_EIMS);
4742 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
4743 E1000_WRITE_FLUSH(hw);
4745 rte_intr_enable(&dev->pci_dev->intr_handle);
4751 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
4752 uint8_t index, uint8_t offset)
4754 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
4757 val &= ~((uint32_t)0xFF << offset);
4759 /* write vector and valid bit */
4760 val |= (msix_vector | E1000_IVAR_VALID) << offset;
4762 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
4766 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
4767 uint8_t queue, uint8_t msix_vector)
4771 if (hw->mac.type == e1000_82575) {
4773 tmp = E1000_EICR_RX_QUEUE0 << queue;
4774 else if (direction == 1)
4775 tmp = E1000_EICR_TX_QUEUE0 << queue;
4776 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
4777 } else if (hw->mac.type == e1000_82576) {
4778 if ((direction == 0) || (direction == 1))
4779 eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
4780 ((queue & 0x8) << 1) +
4782 } else if ((hw->mac.type == e1000_82580) ||
4783 (hw->mac.type == e1000_i350) ||
4784 (hw->mac.type == e1000_i354) ||
4785 (hw->mac.type == e1000_i210) ||
4786 (hw->mac.type == e1000_i211)) {
4787 if ((direction == 0) || (direction == 1))
4788 eth_igb_write_ivar(hw, msix_vector,
4790 ((queue & 0x1) << 4) +
4795 /* Sets up the hardware to generate MSI-X interrupts properly
4797 * board private structure
4800 eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
4803 uint32_t tmpval, regval, intr_mask;
4804 struct e1000_hw *hw =
4805 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4806 uint32_t vec = E1000_MISC_VEC_ID;
4807 uint32_t base = E1000_MISC_VEC_ID;
4808 uint32_t misc_shift = 0;
4810 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4812 /* won't configure msix register if no mapping is done
4813 * between intr vector and event fd
4815 if (!rte_intr_dp_is_en(intr_handle))
4818 if (rte_intr_allow_others(intr_handle)) {
4819 vec = base = E1000_RX_VEC_START;
4823 /* set interrupt vector for other causes */
4824 if (hw->mac.type == e1000_82575) {
4825 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
4826 /* enable MSI-X PBA support */
4827 tmpval |= E1000_CTRL_EXT_PBA_CLR;
4829 /* Auto-Mask interrupts upon ICR read */
4830 tmpval |= E1000_CTRL_EXT_EIAME;
4831 tmpval |= E1000_CTRL_EXT_IRCA;
4833 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
4835 /* enable msix_other interrupt */
4836 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
4837 regval = E1000_READ_REG(hw, E1000_EIAC);
4838 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
4839 regval = E1000_READ_REG(hw, E1000_EIAM);
4840 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
4841 } else if ((hw->mac.type == e1000_82576) ||
4842 (hw->mac.type == e1000_82580) ||
4843 (hw->mac.type == e1000_i350) ||
4844 (hw->mac.type == e1000_i354) ||
4845 (hw->mac.type == e1000_i210) ||
4846 (hw->mac.type == e1000_i211)) {
4847 /* turn on MSI-X capability first */
4848 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
4849 E1000_GPIE_PBA | E1000_GPIE_EIAME |
4851 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
4853 regval = E1000_READ_REG(hw, E1000_EIAC);
4854 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
4856 /* enable msix_other interrupt */
4857 regval = E1000_READ_REG(hw, E1000_EIMS);
4858 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
4859 tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
4860 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
4863 /* use EIAM to auto-mask when MSI-X interrupt
4864 * is asserted, this saves a register write for every interrupt
4866 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
4868 regval = E1000_READ_REG(hw, E1000_EIAM);
4869 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
4871 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
4872 eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
4873 intr_handle->intr_vec[queue_id] = vec;
4874 if (vec < base + intr_handle->nb_efd - 1)
4878 E1000_WRITE_FLUSH(hw);
4881 PMD_REGISTER_DRIVER(pmd_igb_drv);
4882 PMD_REGISTER_DRIVER(pmd_igbvf_drv);