4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
51 #include <rte_atomic.h>
52 #include <rte_malloc.h>
55 #include "e1000_logs.h"
56 #include "base/e1000_api.h"
57 #include "e1000_ethdev.h"
60 * Default values for port configuration
62 #define IGB_DEFAULT_RX_FREE_THRESH 32
63 #define IGB_DEFAULT_RX_PTHRESH 8
64 #define IGB_DEFAULT_RX_HTHRESH 8
65 #define IGB_DEFAULT_RX_WTHRESH 0
67 #define IGB_DEFAULT_TX_PTHRESH 32
68 #define IGB_DEFAULT_TX_HTHRESH 0
69 #define IGB_DEFAULT_TX_WTHRESH 0
71 #define IGB_HKEY_MAX_INDEX 10
73 /* Bit shift and mask */
74 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
75 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
76 #define IGB_8_BIT_WIDTH CHAR_BIT
77 #define IGB_8_BIT_MASK UINT8_MAX
79 /* Additional timesync values. */
80 #define E1000_ETQF_FILTER_1588 3
81 #define E1000_TIMINCA_INCVALUE 16000000
82 #define E1000_TIMINCA_INIT ((0x02 << E1000_TIMINCA_16NS_SHIFT) \
83 | E1000_TIMINCA_INCVALUE)
85 static int eth_igb_configure(struct rte_eth_dev *dev);
86 static int eth_igb_start(struct rte_eth_dev *dev);
87 static void eth_igb_stop(struct rte_eth_dev *dev);
88 static void eth_igb_close(struct rte_eth_dev *dev);
89 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
90 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
91 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
92 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
93 static int eth_igb_link_update(struct rte_eth_dev *dev,
94 int wait_to_complete);
95 static void eth_igb_stats_get(struct rte_eth_dev *dev,
96 struct rte_eth_stats *rte_stats);
97 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
98 static void eth_igb_infos_get(struct rte_eth_dev *dev,
99 struct rte_eth_dev_info *dev_info);
100 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
101 struct rte_eth_dev_info *dev_info);
102 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
103 struct rte_eth_fc_conf *fc_conf);
104 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
105 struct rte_eth_fc_conf *fc_conf);
106 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
107 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
108 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
109 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
111 static int igb_hardware_init(struct e1000_hw *hw);
112 static void igb_hw_control_acquire(struct e1000_hw *hw);
113 static void igb_hw_control_release(struct e1000_hw *hw);
114 static void igb_init_manageability(struct e1000_hw *hw);
115 static void igb_release_manageability(struct e1000_hw *hw);
117 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
119 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
120 uint16_t vlan_id, int on);
121 static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
122 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
124 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
125 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
126 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
127 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
128 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
129 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
131 static int eth_igb_led_on(struct rte_eth_dev *dev);
132 static int eth_igb_led_off(struct rte_eth_dev *dev);
134 static void igb_intr_disable(struct e1000_hw *hw);
135 static int igb_get_rx_buffer_size(struct e1000_hw *hw);
136 static void eth_igb_rar_set(struct rte_eth_dev *dev,
137 struct ether_addr *mac_addr,
138 uint32_t index, uint32_t pool);
139 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
140 static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
141 struct ether_addr *addr);
143 static void igbvf_intr_disable(struct e1000_hw *hw);
144 static int igbvf_dev_configure(struct rte_eth_dev *dev);
145 static int igbvf_dev_start(struct rte_eth_dev *dev);
146 static void igbvf_dev_stop(struct rte_eth_dev *dev);
147 static void igbvf_dev_close(struct rte_eth_dev *dev);
148 static int eth_igbvf_link_update(struct e1000_hw *hw);
149 static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
150 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
151 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
152 uint16_t vlan_id, int on);
153 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
154 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
155 static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
156 struct ether_addr *addr);
157 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
158 struct rte_eth_rss_reta_entry64 *reta_conf,
160 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
161 struct rte_eth_rss_reta_entry64 *reta_conf,
164 static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
165 struct rte_eth_syn_filter *filter,
167 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
168 struct rte_eth_syn_filter *filter);
169 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
170 enum rte_filter_op filter_op,
172 static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
173 struct rte_eth_ntuple_filter *ntuple_filter);
174 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
175 struct rte_eth_ntuple_filter *ntuple_filter);
176 static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
177 struct rte_eth_flex_filter *filter,
179 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
180 struct rte_eth_flex_filter *filter);
181 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
182 enum rte_filter_op filter_op,
184 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
185 struct rte_eth_ntuple_filter *ntuple_filter);
186 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
187 struct rte_eth_ntuple_filter *ntuple_filter);
188 static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
189 struct rte_eth_ntuple_filter *filter,
191 static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
192 struct rte_eth_ntuple_filter *filter);
193 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
194 enum rte_filter_op filter_op,
196 static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
197 struct rte_eth_ethertype_filter *filter,
199 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
200 enum rte_filter_op filter_op,
202 static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
203 struct rte_eth_ethertype_filter *filter);
204 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
205 enum rte_filter_type filter_type,
206 enum rte_filter_op filter_op,
209 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
210 struct ether_addr *mc_addr_set,
211 uint32_t nb_mc_addr);
212 static int igb_timesync_enable(struct rte_eth_dev *dev);
213 static int igb_timesync_disable(struct rte_eth_dev *dev);
214 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
215 struct timespec *timestamp,
217 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
218 struct timespec *timestamp);
221 * Define VF Stats MACRO for Non "cleared on read" register
223 #define UPDATE_VF_STAT(reg, last, cur) \
225 u32 latest = E1000_READ_REG(hw, reg); \
226 cur += latest - last; \
231 #define IGB_FC_PAUSE_TIME 0x0680
232 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
233 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
235 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
237 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
240 * The set of PCI devices this driver supports
242 static const struct rte_pci_id pci_id_igb_map[] = {
244 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
245 #include "rte_pci_dev_ids.h"
251 * The set of PCI devices this driver supports (for 82576&I350 VF)
253 static const struct rte_pci_id pci_id_igbvf_map[] = {
255 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
256 #include "rte_pci_dev_ids.h"
261 static const struct eth_dev_ops eth_igb_ops = {
262 .dev_configure = eth_igb_configure,
263 .dev_start = eth_igb_start,
264 .dev_stop = eth_igb_stop,
265 .dev_close = eth_igb_close,
266 .promiscuous_enable = eth_igb_promiscuous_enable,
267 .promiscuous_disable = eth_igb_promiscuous_disable,
268 .allmulticast_enable = eth_igb_allmulticast_enable,
269 .allmulticast_disable = eth_igb_allmulticast_disable,
270 .link_update = eth_igb_link_update,
271 .stats_get = eth_igb_stats_get,
272 .stats_reset = eth_igb_stats_reset,
273 .dev_infos_get = eth_igb_infos_get,
274 .mtu_set = eth_igb_mtu_set,
275 .vlan_filter_set = eth_igb_vlan_filter_set,
276 .vlan_tpid_set = eth_igb_vlan_tpid_set,
277 .vlan_offload_set = eth_igb_vlan_offload_set,
278 .rx_queue_setup = eth_igb_rx_queue_setup,
279 .rx_queue_release = eth_igb_rx_queue_release,
280 .rx_queue_count = eth_igb_rx_queue_count,
281 .rx_descriptor_done = eth_igb_rx_descriptor_done,
282 .tx_queue_setup = eth_igb_tx_queue_setup,
283 .tx_queue_release = eth_igb_tx_queue_release,
284 .dev_led_on = eth_igb_led_on,
285 .dev_led_off = eth_igb_led_off,
286 .flow_ctrl_get = eth_igb_flow_ctrl_get,
287 .flow_ctrl_set = eth_igb_flow_ctrl_set,
288 .mac_addr_add = eth_igb_rar_set,
289 .mac_addr_remove = eth_igb_rar_clear,
290 .mac_addr_set = eth_igb_default_mac_addr_set,
291 .reta_update = eth_igb_rss_reta_update,
292 .reta_query = eth_igb_rss_reta_query,
293 .rss_hash_update = eth_igb_rss_hash_update,
294 .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
295 .filter_ctrl = eth_igb_filter_ctrl,
296 .set_mc_addr_list = eth_igb_set_mc_addr_list,
297 .timesync_enable = igb_timesync_enable,
298 .timesync_disable = igb_timesync_disable,
299 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
300 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
304 * dev_ops for virtual function, bare necessities for basic vf
305 * operation have been implemented
307 static const struct eth_dev_ops igbvf_eth_dev_ops = {
308 .dev_configure = igbvf_dev_configure,
309 .dev_start = igbvf_dev_start,
310 .dev_stop = igbvf_dev_stop,
311 .dev_close = igbvf_dev_close,
312 .link_update = eth_igb_link_update,
313 .stats_get = eth_igbvf_stats_get,
314 .stats_reset = eth_igbvf_stats_reset,
315 .vlan_filter_set = igbvf_vlan_filter_set,
316 .dev_infos_get = eth_igbvf_infos_get,
317 .rx_queue_setup = eth_igb_rx_queue_setup,
318 .rx_queue_release = eth_igb_rx_queue_release,
319 .tx_queue_setup = eth_igb_tx_queue_setup,
320 .tx_queue_release = eth_igb_tx_queue_release,
321 .set_mc_addr_list = eth_igb_set_mc_addr_list,
322 .mac_addr_set = igbvf_default_mac_addr_set,
326 * Atomically reads the link status information from global
327 * structure rte_eth_dev.
330 * - Pointer to the structure rte_eth_dev to read from.
331 * - Pointer to the buffer to be saved with the link status.
334 * - On success, zero.
335 * - On failure, negative value.
338 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
339 struct rte_eth_link *link)
341 struct rte_eth_link *dst = link;
342 struct rte_eth_link *src = &(dev->data->dev_link);
344 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
345 *(uint64_t *)src) == 0)
352 * Atomically writes the link status information into global
353 * structure rte_eth_dev.
356 * - Pointer to the structure rte_eth_dev to read from.
357 * - Pointer to the buffer to be saved with the link status.
360 * - On success, zero.
361 * - On failure, negative value.
364 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
365 struct rte_eth_link *link)
367 struct rte_eth_link *dst = &(dev->data->dev_link);
368 struct rte_eth_link *src = link;
370 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
371 *(uint64_t *)src) == 0)
378 igb_intr_enable(struct rte_eth_dev *dev)
380 struct e1000_interrupt *intr =
381 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
382 struct e1000_hw *hw =
383 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
385 E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
386 E1000_WRITE_FLUSH(hw);
390 igb_intr_disable(struct e1000_hw *hw)
392 E1000_WRITE_REG(hw, E1000_IMC, ~0);
393 E1000_WRITE_FLUSH(hw);
396 static inline int32_t
397 igb_pf_reset_hw(struct e1000_hw *hw)
402 status = e1000_reset_hw(hw);
404 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
405 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
406 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
407 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
408 E1000_WRITE_FLUSH(hw);
414 igb_identify_hardware(struct rte_eth_dev *dev)
416 struct e1000_hw *hw =
417 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
419 hw->vendor_id = dev->pci_dev->id.vendor_id;
420 hw->device_id = dev->pci_dev->id.device_id;
421 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
422 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
424 e1000_set_mac_type(hw);
426 /* need to check if it is a vf device below */
430 igb_reset_swfw_lock(struct e1000_hw *hw)
435 * Do mac ops initialization manually here, since we will need
436 * some function pointers set by this call.
438 ret_val = e1000_init_mac_params(hw);
443 * SMBI lock should not fail in this early stage. If this is the case,
444 * it is due to an improper exit of the application.
445 * So force the release of the faulty lock.
447 if (e1000_get_hw_semaphore_generic(hw) < 0) {
448 PMD_DRV_LOG(DEBUG, "SMBI lock released");
450 e1000_put_hw_semaphore_generic(hw);
452 if (hw->mac.ops.acquire_swfw_sync != NULL) {
456 * Phy lock should not fail in this early stage. If this is the case,
457 * it is due to an improper exit of the application.
458 * So force the release of the faulty lock.
460 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
461 if (hw->bus.func > E1000_FUNC_1)
463 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
464 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
467 hw->mac.ops.release_swfw_sync(hw, mask);
470 * This one is more tricky since it is common to all ports; but
471 * swfw_sync retries last long enough (1s) to be almost sure that if
472 * lock can not be taken it is due to an improper lock of the
475 mask = E1000_SWFW_EEP_SM;
476 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
477 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
479 hw->mac.ops.release_swfw_sync(hw, mask);
482 return E1000_SUCCESS;
486 eth_igb_dev_init(struct rte_eth_dev *eth_dev)
489 struct rte_pci_device *pci_dev;
490 struct e1000_hw *hw =
491 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
492 struct e1000_vfta * shadow_vfta =
493 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
494 struct e1000_filter_info *filter_info =
495 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
498 pci_dev = eth_dev->pci_dev;
499 eth_dev->dev_ops = ð_igb_ops;
500 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
501 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
503 /* for secondary processes, we don't initialise any further as primary
504 * has already done this work. Only check we don't need a different
506 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
507 if (eth_dev->data->scattered_rx)
508 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
512 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
514 igb_identify_hardware(eth_dev);
515 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
520 e1000_get_bus_info(hw);
522 /* Reset any pending lock */
523 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
528 /* Finish initialization */
529 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
535 hw->phy.autoneg_wait_to_complete = 0;
536 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
539 if (hw->phy.media_type == e1000_media_type_copper) {
540 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
541 hw->phy.disable_polarity_correction = 0;
542 hw->phy.ms_type = e1000_ms_hw_default;
546 * Start from a known state, this is important in reading the nvm
551 /* Make sure we have a good EEPROM before we read from it */
552 if (e1000_validate_nvm_checksum(hw) < 0) {
554 * Some PCI-E parts fail the first check due to
555 * the link being in sleep state, call it again,
556 * if it fails a second time its a real issue.
558 if (e1000_validate_nvm_checksum(hw) < 0) {
559 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
565 /* Read the permanent MAC address out of the EEPROM */
566 if (e1000_read_mac_addr(hw) != 0) {
567 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
572 /* Allocate memory for storing MAC addresses */
573 eth_dev->data->mac_addrs = rte_zmalloc("e1000",
574 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
575 if (eth_dev->data->mac_addrs == NULL) {
576 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
577 "store MAC addresses",
578 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
583 /* Copy the permanent MAC address */
584 ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]);
586 /* initialize the vfta */
587 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
589 /* Now initialize the hardware */
590 if (igb_hardware_init(hw) != 0) {
591 PMD_INIT_LOG(ERR, "Hardware initialization failed");
592 rte_free(eth_dev->data->mac_addrs);
593 eth_dev->data->mac_addrs = NULL;
597 hw->mac.get_link_status = 1;
599 /* Indicate SOL/IDER usage */
600 if (e1000_check_reset_block(hw) < 0) {
601 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
605 /* initialize PF if max_vfs not zero */
606 igb_pf_host_init(eth_dev);
608 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
609 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
610 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
611 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
612 E1000_WRITE_FLUSH(hw);
614 PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
615 eth_dev->data->port_id, pci_dev->id.vendor_id,
616 pci_dev->id.device_id);
618 rte_intr_callback_register(&(pci_dev->intr_handle),
619 eth_igb_interrupt_handler, (void *)eth_dev);
621 /* enable uio intr after callback register */
622 rte_intr_enable(&(pci_dev->intr_handle));
624 /* enable support intr */
625 igb_intr_enable(eth_dev);
627 TAILQ_INIT(&filter_info->flex_list);
628 filter_info->flex_mask = 0;
629 TAILQ_INIT(&filter_info->twotuple_list);
630 filter_info->twotuple_mask = 0;
631 TAILQ_INIT(&filter_info->fivetuple_list);
632 filter_info->fivetuple_mask = 0;
637 igb_hw_control_release(hw);
643 * Virtual Function device init
646 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
648 struct rte_pci_device *pci_dev;
649 struct e1000_hw *hw =
650 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
653 PMD_INIT_FUNC_TRACE();
655 eth_dev->dev_ops = &igbvf_eth_dev_ops;
656 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
657 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
659 /* for secondary processes, we don't initialise any further as primary
660 * has already done this work. Only check we don't need a different
662 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
663 if (eth_dev->data->scattered_rx)
664 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
668 pci_dev = eth_dev->pci_dev;
670 hw->device_id = pci_dev->id.device_id;
671 hw->vendor_id = pci_dev->id.vendor_id;
672 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
674 /* Initialize the shared code (base driver) */
675 diag = e1000_setup_init_funcs(hw, TRUE);
677 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
682 /* init_mailbox_params */
683 hw->mbx.ops.init_params(hw);
685 /* Disable the interrupts for VF */
686 igbvf_intr_disable(hw);
688 diag = hw->mac.ops.reset_hw(hw);
690 /* Allocate memory for storing MAC addresses */
691 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
692 hw->mac.rar_entry_count, 0);
693 if (eth_dev->data->mac_addrs == NULL) {
695 "Failed to allocate %d bytes needed to store MAC "
697 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
701 /* Copy the permanent MAC address */
702 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
703 ð_dev->data->mac_addrs[0]);
705 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
707 eth_dev->data->port_id, pci_dev->id.vendor_id,
708 pci_dev->id.device_id, "igb_mac_82576_vf");
713 static struct eth_driver rte_igb_pmd = {
715 .name = "rte_igb_pmd",
716 .id_table = pci_id_igb_map,
717 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
719 .eth_dev_init = eth_igb_dev_init,
720 .dev_private_size = sizeof(struct e1000_adapter),
724 * virtual function driver struct
726 static struct eth_driver rte_igbvf_pmd = {
728 .name = "rte_igbvf_pmd",
729 .id_table = pci_id_igbvf_map,
730 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
732 .eth_dev_init = eth_igbvf_dev_init,
733 .dev_private_size = sizeof(struct e1000_adapter),
737 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
739 rte_eth_driver_register(&rte_igb_pmd);
744 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
746 struct e1000_hw *hw =
747 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
748 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
749 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
750 rctl |= E1000_RCTL_VFE;
751 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
755 * VF Driver initialization routine.
756 * Invoked one at EAL init time.
757 * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
760 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
762 PMD_INIT_FUNC_TRACE();
764 rte_eth_driver_register(&rte_igbvf_pmd);
769 eth_igb_configure(struct rte_eth_dev *dev)
771 struct e1000_interrupt *intr =
772 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
774 PMD_INIT_FUNC_TRACE();
775 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
776 PMD_INIT_FUNC_TRACE();
782 eth_igb_start(struct rte_eth_dev *dev)
784 struct e1000_hw *hw =
785 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
789 PMD_INIT_FUNC_TRACE();
791 /* Power up the phy. Needed to make the link go Up */
792 e1000_power_up_phy(hw);
795 * Packet Buffer Allocation (PBA)
796 * Writing PBA sets the receive portion of the buffer
797 * the remainder is used for the transmit buffer.
799 if (hw->mac.type == e1000_82575) {
802 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
803 E1000_WRITE_REG(hw, E1000_PBA, pba);
806 /* Put the address into the Receive Address Array */
807 e1000_rar_set(hw, hw->mac.addr, 0);
809 /* Initialize the hardware */
810 if (igb_hardware_init(hw)) {
811 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
815 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
817 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
818 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
819 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
820 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
821 E1000_WRITE_FLUSH(hw);
823 /* configure PF module if SRIOV enabled */
824 igb_pf_host_configure(dev);
826 /* Configure for OS presence */
827 igb_init_manageability(hw);
829 eth_igb_tx_init(dev);
831 /* This can fail when allocating mbufs for descriptor rings */
832 ret = eth_igb_rx_init(dev);
834 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
835 igb_dev_clear_queues(dev);
839 e1000_clear_hw_cntrs_base_generic(hw);
842 * VLAN Offload Settings
844 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
845 ETH_VLAN_EXTEND_MASK;
846 eth_igb_vlan_offload_set(dev, mask);
848 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
849 /* Enable VLAN filter since VMDq always use VLAN filter */
850 igb_vmdq_vlan_hw_filter_enable(dev);
854 * Configure the Interrupt Moderation register (EITR) with the maximum
855 * possible value (0xFFFF) to minimize "System Partial Write" issued by
856 * spurious [DMA] memory updates of RX and TX ring descriptors.
858 * With a EITR granularity of 2 microseconds in the 82576, only 7/8
859 * spurious memory updates per second should be expected.
860 * ((65535 * 2) / 1000.1000 ~= 0.131 second).
862 * Because interrupts are not used at all, the MSI-X is not activated
863 * and interrupt moderation is controlled by EITR[0].
865 * Note that having [almost] disabled memory updates of RX and TX ring
866 * descriptors through the Interrupt Moderation mechanism, memory
867 * updates of ring descriptors are now moderated by the configurable
868 * value of Write-Back Threshold registers.
870 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
871 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
872 (hw->mac.type == e1000_i211)) {
875 /* Enable all RX & TX queues in the IVAR registers */
876 ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
877 for (i = 0; i < 8; i++)
878 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
880 /* Configure EITR with the maximum possible value (0xFFFF) */
881 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
884 /* Setup link speed and duplex */
885 switch (dev->data->dev_conf.link_speed) {
886 case ETH_LINK_SPEED_AUTONEG:
887 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
888 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
889 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
890 hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
891 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
892 hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
894 goto error_invalid_config;
896 case ETH_LINK_SPEED_10:
897 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
898 hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
899 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
900 hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
901 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
902 hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
904 goto error_invalid_config;
906 case ETH_LINK_SPEED_100:
907 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
908 hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
909 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
910 hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
911 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
912 hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
914 goto error_invalid_config;
916 case ETH_LINK_SPEED_1000:
917 if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
918 (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
919 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
921 goto error_invalid_config;
923 case ETH_LINK_SPEED_10000:
925 goto error_invalid_config;
927 e1000_setup_link(hw);
929 /* check if lsc interrupt feature is enabled */
930 if (dev->data->dev_conf.intr_conf.lsc != 0)
931 ret = eth_igb_lsc_interrupt_setup(dev);
933 /* resume enabled intr since hw reset */
934 igb_intr_enable(dev);
936 PMD_INIT_LOG(DEBUG, "<<");
940 error_invalid_config:
941 PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
942 dev->data->dev_conf.link_speed,
943 dev->data->dev_conf.link_duplex, dev->data->port_id);
944 igb_dev_clear_queues(dev);
948 /*********************************************************************
950 * This routine disables all traffic on the adapter by issuing a
951 * global reset on the MAC.
953 **********************************************************************/
955 eth_igb_stop(struct rte_eth_dev *dev)
957 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
958 struct e1000_filter_info *filter_info =
959 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
960 struct rte_eth_link link;
961 struct e1000_flex_filter *p_flex;
962 struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
963 struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
965 igb_intr_disable(hw);
967 E1000_WRITE_REG(hw, E1000_WUC, 0);
969 /* Set bit for Go Link disconnect */
970 if (hw->mac.type >= e1000_82580) {
973 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
974 phpm_reg |= E1000_82580_PM_GO_LINKD;
975 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
978 /* Power down the phy. Needed to make the link go Down */
979 if (hw->phy.media_type == e1000_media_type_copper)
980 e1000_power_down_phy(hw);
982 e1000_shutdown_fiber_serdes_link(hw);
984 igb_dev_clear_queues(dev);
986 /* clear the recorded link status */
987 memset(&link, 0, sizeof(link));
988 rte_igb_dev_atomic_write_link_status(dev, &link);
990 /* Remove all flex filters of the device */
991 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
992 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
995 filter_info->flex_mask = 0;
997 /* Remove all ntuple filters of the device */
998 for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
999 p_5tuple != NULL; p_5tuple = p_5tuple_next) {
1000 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
1001 TAILQ_REMOVE(&filter_info->fivetuple_list,
1005 filter_info->fivetuple_mask = 0;
1006 for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
1007 p_2tuple != NULL; p_2tuple = p_2tuple_next) {
1008 p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
1009 TAILQ_REMOVE(&filter_info->twotuple_list,
1013 filter_info->twotuple_mask = 0;
1017 eth_igb_close(struct rte_eth_dev *dev)
1019 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1020 struct rte_eth_link link;
1023 e1000_phy_hw_reset(hw);
1024 igb_release_manageability(hw);
1025 igb_hw_control_release(hw);
1027 /* Clear bit for Go Link disconnect */
1028 if (hw->mac.type >= e1000_82580) {
1031 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1032 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1033 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1036 igb_dev_clear_queues(dev);
1038 memset(&link, 0, sizeof(link));
1039 rte_igb_dev_atomic_write_link_status(dev, &link);
1043 igb_get_rx_buffer_size(struct e1000_hw *hw)
1045 uint32_t rx_buf_size;
1046 if (hw->mac.type == e1000_82576) {
1047 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1048 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1049 /* PBS needs to be translated according to a lookup table */
1050 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1051 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1052 rx_buf_size = (rx_buf_size << 10);
1053 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1054 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1056 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1062 /*********************************************************************
1064 * Initialize the hardware
1066 **********************************************************************/
1068 igb_hardware_init(struct e1000_hw *hw)
1070 uint32_t rx_buf_size;
1073 /* Let the firmware know the OS is in control */
1074 igb_hw_control_acquire(hw);
1077 * These parameters control the automatic generation (Tx) and
1078 * response (Rx) to Ethernet PAUSE frames.
1079 * - High water mark should allow for at least two standard size (1518)
1080 * frames to be received after sending an XOFF.
1081 * - Low water mark works best when it is very near the high water mark.
1082 * This allows the receiver to restart by sending XON when it has
1083 * drained a bit. Here we use an arbitrary value of 1500 which will
1084 * restart after one full frame is pulled from the buffer. There
1085 * could be several smaller frames in the buffer and if so they will
1086 * not trigger the XON until their total number reduces the buffer
1088 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1090 rx_buf_size = igb_get_rx_buffer_size(hw);
1092 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1093 hw->fc.low_water = hw->fc.high_water - 1500;
1094 hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1095 hw->fc.send_xon = 1;
1097 /* Set Flow control, use the tunable location if sane */
1098 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1099 hw->fc.requested_mode = igb_fc_setting;
1101 hw->fc.requested_mode = e1000_fc_none;
1103 /* Issue a global reset */
1104 igb_pf_reset_hw(hw);
1105 E1000_WRITE_REG(hw, E1000_WUC, 0);
1107 diag = e1000_init_hw(hw);
1111 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1112 e1000_get_phy_info(hw);
1113 e1000_check_for_link(hw);
1118 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1120 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1122 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1123 struct e1000_hw_stats *stats =
1124 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1127 if(hw->phy.media_type == e1000_media_type_copper ||
1128 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1130 E1000_READ_REG(hw,E1000_SYMERRS);
1131 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1134 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1135 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1136 stats->scc += E1000_READ_REG(hw, E1000_SCC);
1137 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1139 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1140 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1141 stats->colc += E1000_READ_REG(hw, E1000_COLC);
1142 stats->dc += E1000_READ_REG(hw, E1000_DC);
1143 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1144 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1145 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1147 ** For watchdog management we need to know if we have been
1148 ** paused during the last interval, so capture that here.
1150 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1151 stats->xoffrxc += pause_frames;
1152 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1153 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1154 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1155 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1156 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1157 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1158 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1159 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1160 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1161 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1162 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1163 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1165 /* For the 64-bit byte counters the low dword must be read first. */
1166 /* Both registers clear on the read of the high dword */
1168 stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1169 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1170 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1171 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1173 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1174 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1175 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1176 stats->roc += E1000_READ_REG(hw, E1000_ROC);
1177 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1179 stats->tor += E1000_READ_REG(hw, E1000_TORH);
1180 stats->tot += E1000_READ_REG(hw, E1000_TOTH);
1182 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1183 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1184 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1185 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1186 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1187 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1188 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1189 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1190 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1191 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1193 /* Interrupt Counts */
1195 stats->iac += E1000_READ_REG(hw, E1000_IAC);
1196 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1197 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1198 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1199 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1200 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1201 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1202 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1203 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1205 /* Host to Card Statistics */
1207 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1208 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1209 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1210 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1211 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1212 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1213 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1214 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1215 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1216 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1217 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1218 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1219 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1220 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1222 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1223 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1224 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1225 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1226 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1227 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1229 if (rte_stats == NULL)
1233 rte_stats->ibadcrc = stats->crcerrs;
1234 rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
1235 rte_stats->imissed = stats->mpc;
1236 rte_stats->ierrors = rte_stats->ibadcrc +
1237 rte_stats->ibadlen +
1238 rte_stats->imissed +
1239 stats->rxerrc + stats->algnerrc + stats->cexterr;
1242 rte_stats->oerrors = stats->ecol + stats->latecol;
1244 /* XON/XOFF pause frames */
1245 rte_stats->tx_pause_xon = stats->xontxc;
1246 rte_stats->rx_pause_xon = stats->xonrxc;
1247 rte_stats->tx_pause_xoff = stats->xofftxc;
1248 rte_stats->rx_pause_xoff = stats->xoffrxc;
1250 rte_stats->ipackets = stats->gprc;
1251 rte_stats->opackets = stats->gptc;
1252 rte_stats->ibytes = stats->gorc;
1253 rte_stats->obytes = stats->gotc;
1257 eth_igb_stats_reset(struct rte_eth_dev *dev)
1259 struct e1000_hw_stats *hw_stats =
1260 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1262 /* HW registers are cleared on read */
1263 eth_igb_stats_get(dev, NULL);
1265 /* Reset software totals */
1266 memset(hw_stats, 0, sizeof(*hw_stats));
1270 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1272 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1273 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1274 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1276 /* Good Rx packets, include VF loopback */
1277 UPDATE_VF_STAT(E1000_VFGPRC,
1278 hw_stats->last_gprc, hw_stats->gprc);
1280 /* Good Rx octets, include VF loopback */
1281 UPDATE_VF_STAT(E1000_VFGORC,
1282 hw_stats->last_gorc, hw_stats->gorc);
1284 /* Good Tx packets, include VF loopback */
1285 UPDATE_VF_STAT(E1000_VFGPTC,
1286 hw_stats->last_gptc, hw_stats->gptc);
1288 /* Good Tx octets, include VF loopback */
1289 UPDATE_VF_STAT(E1000_VFGOTC,
1290 hw_stats->last_gotc, hw_stats->gotc);
1292 /* Rx Multicst packets */
1293 UPDATE_VF_STAT(E1000_VFMPRC,
1294 hw_stats->last_mprc, hw_stats->mprc);
1296 /* Good Rx loopback packets */
1297 UPDATE_VF_STAT(E1000_VFGPRLBC,
1298 hw_stats->last_gprlbc, hw_stats->gprlbc);
1300 /* Good Rx loopback octets */
1301 UPDATE_VF_STAT(E1000_VFGORLBC,
1302 hw_stats->last_gorlbc, hw_stats->gorlbc);
1304 /* Good Tx loopback packets */
1305 UPDATE_VF_STAT(E1000_VFGPTLBC,
1306 hw_stats->last_gptlbc, hw_stats->gptlbc);
1308 /* Good Tx loopback octets */
1309 UPDATE_VF_STAT(E1000_VFGOTLBC,
1310 hw_stats->last_gotlbc, hw_stats->gotlbc);
1312 if (rte_stats == NULL)
1315 rte_stats->ipackets = hw_stats->gprc;
1316 rte_stats->ibytes = hw_stats->gorc;
1317 rte_stats->opackets = hw_stats->gptc;
1318 rte_stats->obytes = hw_stats->gotc;
1319 rte_stats->imcasts = hw_stats->mprc;
1320 rte_stats->ilbpackets = hw_stats->gprlbc;
1321 rte_stats->ilbbytes = hw_stats->gorlbc;
1322 rte_stats->olbpackets = hw_stats->gptlbc;
1323 rte_stats->olbbytes = hw_stats->gotlbc;
1328 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1330 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1331 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1333 /* Sync HW register to the last stats */
1334 eth_igbvf_stats_get(dev, NULL);
1336 /* reset HW current stats*/
1337 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1338 offsetof(struct e1000_vf_stats, gprc));
1343 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1345 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1347 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1348 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1349 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1350 dev_info->rx_offload_capa =
1351 DEV_RX_OFFLOAD_VLAN_STRIP |
1352 DEV_RX_OFFLOAD_IPV4_CKSUM |
1353 DEV_RX_OFFLOAD_UDP_CKSUM |
1354 DEV_RX_OFFLOAD_TCP_CKSUM;
1355 dev_info->tx_offload_capa =
1356 DEV_TX_OFFLOAD_VLAN_INSERT |
1357 DEV_TX_OFFLOAD_IPV4_CKSUM |
1358 DEV_TX_OFFLOAD_UDP_CKSUM |
1359 DEV_TX_OFFLOAD_TCP_CKSUM |
1360 DEV_TX_OFFLOAD_SCTP_CKSUM;
1362 switch (hw->mac.type) {
1364 dev_info->max_rx_queues = 4;
1365 dev_info->max_tx_queues = 4;
1366 dev_info->max_vmdq_pools = 0;
1370 dev_info->max_rx_queues = 16;
1371 dev_info->max_tx_queues = 16;
1372 dev_info->max_vmdq_pools = ETH_8_POOLS;
1373 dev_info->vmdq_queue_num = 16;
1377 dev_info->max_rx_queues = 8;
1378 dev_info->max_tx_queues = 8;
1379 dev_info->max_vmdq_pools = ETH_8_POOLS;
1380 dev_info->vmdq_queue_num = 8;
1384 dev_info->max_rx_queues = 8;
1385 dev_info->max_tx_queues = 8;
1386 dev_info->max_vmdq_pools = ETH_8_POOLS;
1387 dev_info->vmdq_queue_num = 8;
1391 dev_info->max_rx_queues = 8;
1392 dev_info->max_tx_queues = 8;
1396 dev_info->max_rx_queues = 4;
1397 dev_info->max_tx_queues = 4;
1398 dev_info->max_vmdq_pools = 0;
1402 dev_info->max_rx_queues = 2;
1403 dev_info->max_tx_queues = 2;
1404 dev_info->max_vmdq_pools = 0;
1408 /* Should not happen */
1411 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
1412 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1413 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
1415 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1417 .pthresh = IGB_DEFAULT_RX_PTHRESH,
1418 .hthresh = IGB_DEFAULT_RX_HTHRESH,
1419 .wthresh = IGB_DEFAULT_RX_WTHRESH,
1421 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1425 dev_info->default_txconf = (struct rte_eth_txconf) {
1427 .pthresh = IGB_DEFAULT_TX_PTHRESH,
1428 .hthresh = IGB_DEFAULT_TX_HTHRESH,
1429 .wthresh = IGB_DEFAULT_TX_WTHRESH,
1436 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1438 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1440 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1441 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1442 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1443 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1444 DEV_RX_OFFLOAD_IPV4_CKSUM |
1445 DEV_RX_OFFLOAD_UDP_CKSUM |
1446 DEV_RX_OFFLOAD_TCP_CKSUM;
1447 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1448 DEV_TX_OFFLOAD_IPV4_CKSUM |
1449 DEV_TX_OFFLOAD_UDP_CKSUM |
1450 DEV_TX_OFFLOAD_TCP_CKSUM |
1451 DEV_TX_OFFLOAD_SCTP_CKSUM;
1452 switch (hw->mac.type) {
1454 dev_info->max_rx_queues = 2;
1455 dev_info->max_tx_queues = 2;
1457 case e1000_vfadapt_i350:
1458 dev_info->max_rx_queues = 1;
1459 dev_info->max_tx_queues = 1;
1462 /* Should not happen */
1466 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1468 .pthresh = IGB_DEFAULT_RX_PTHRESH,
1469 .hthresh = IGB_DEFAULT_RX_HTHRESH,
1470 .wthresh = IGB_DEFAULT_RX_WTHRESH,
1472 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1476 dev_info->default_txconf = (struct rte_eth_txconf) {
1478 .pthresh = IGB_DEFAULT_TX_PTHRESH,
1479 .hthresh = IGB_DEFAULT_TX_HTHRESH,
1480 .wthresh = IGB_DEFAULT_TX_WTHRESH,
1486 /* return 0 means link status changed, -1 means not changed */
1488 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1490 struct e1000_hw *hw =
1491 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1492 struct rte_eth_link link, old;
1493 int link_check, count;
1496 hw->mac.get_link_status = 1;
1498 /* possible wait-to-complete in up to 9 seconds */
1499 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
1500 /* Read the real link status */
1501 switch (hw->phy.media_type) {
1502 case e1000_media_type_copper:
1503 /* Do the work to read phy */
1504 e1000_check_for_link(hw);
1505 link_check = !hw->mac.get_link_status;
1508 case e1000_media_type_fiber:
1509 e1000_check_for_link(hw);
1510 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1514 case e1000_media_type_internal_serdes:
1515 e1000_check_for_link(hw);
1516 link_check = hw->mac.serdes_has_link;
1519 /* VF device is type_unknown */
1520 case e1000_media_type_unknown:
1521 eth_igbvf_link_update(hw);
1522 link_check = !hw->mac.get_link_status;
1528 if (link_check || wait_to_complete == 0)
1530 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
1532 memset(&link, 0, sizeof(link));
1533 rte_igb_dev_atomic_read_link_status(dev, &link);
1536 /* Now we check if a transition has happened */
1538 hw->mac.ops.get_link_up_info(hw, &link.link_speed,
1540 link.link_status = 1;
1541 } else if (!link_check) {
1542 link.link_speed = 0;
1543 link.link_duplex = 0;
1544 link.link_status = 0;
1546 rte_igb_dev_atomic_write_link_status(dev, &link);
1549 if (old.link_status == link.link_status)
1557 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
1558 * For ASF and Pass Through versions of f/w this means
1559 * that the driver is loaded.
1562 igb_hw_control_acquire(struct e1000_hw *hw)
1566 /* Let firmware know the driver has taken over */
1567 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1568 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1572 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
1573 * For ASF and Pass Through versions of f/w this means that the
1574 * driver is no longer loaded.
1577 igb_hw_control_release(struct e1000_hw *hw)
1581 /* Let firmware taken over control of h/w */
1582 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1583 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
1584 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1588 * Bit of a misnomer, what this really means is
1589 * to enable OS management of the system... aka
1590 * to disable special hardware management features.
1593 igb_init_manageability(struct e1000_hw *hw)
1595 if (e1000_enable_mng_pass_thru(hw)) {
1596 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
1597 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1599 /* disable hardware interception of ARP */
1600 manc &= ~(E1000_MANC_ARP_EN);
1602 /* enable receiving management packets to the host */
1603 manc |= E1000_MANC_EN_MNG2HOST;
1604 manc2h |= 1 << 5; /* Mng Port 623 */
1605 manc2h |= 1 << 6; /* Mng Port 664 */
1606 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
1607 E1000_WRITE_REG(hw, E1000_MANC, manc);
1612 igb_release_manageability(struct e1000_hw *hw)
1614 if (e1000_enable_mng_pass_thru(hw)) {
1615 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1617 manc |= E1000_MANC_ARP_EN;
1618 manc &= ~E1000_MANC_EN_MNG2HOST;
1620 E1000_WRITE_REG(hw, E1000_MANC, manc);
1625 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
1627 struct e1000_hw *hw =
1628 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1631 rctl = E1000_READ_REG(hw, E1000_RCTL);
1632 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1633 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1637 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
1639 struct e1000_hw *hw =
1640 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1643 rctl = E1000_READ_REG(hw, E1000_RCTL);
1644 rctl &= (~E1000_RCTL_UPE);
1645 if (dev->data->all_multicast == 1)
1646 rctl |= E1000_RCTL_MPE;
1648 rctl &= (~E1000_RCTL_MPE);
1649 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1653 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
1655 struct e1000_hw *hw =
1656 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1659 rctl = E1000_READ_REG(hw, E1000_RCTL);
1660 rctl |= E1000_RCTL_MPE;
1661 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1665 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
1667 struct e1000_hw *hw =
1668 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1671 if (dev->data->promiscuous == 1)
1672 return; /* must remain in all_multicast mode */
1673 rctl = E1000_READ_REG(hw, E1000_RCTL);
1674 rctl &= (~E1000_RCTL_MPE);
1675 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1679 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1681 struct e1000_hw *hw =
1682 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1683 struct e1000_vfta * shadow_vfta =
1684 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1689 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
1690 E1000_VFTA_ENTRY_MASK);
1691 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
1692 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
1697 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
1699 /* update local VFTA copy */
1700 shadow_vfta->vfta[vid_idx] = vfta;
1706 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1708 struct e1000_hw *hw =
1709 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1710 uint32_t reg = ETHER_TYPE_VLAN ;
1712 reg |= (tpid << 16);
1713 E1000_WRITE_REG(hw, E1000_VET, reg);
1717 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1719 struct e1000_hw *hw =
1720 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1723 /* Filter Table Disable */
1724 reg = E1000_READ_REG(hw, E1000_RCTL);
1725 reg &= ~E1000_RCTL_CFIEN;
1726 reg &= ~E1000_RCTL_VFE;
1727 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1731 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1733 struct e1000_hw *hw =
1734 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1735 struct e1000_vfta * shadow_vfta =
1736 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1740 /* Filter Table Enable, CFI not used for packet acceptance */
1741 reg = E1000_READ_REG(hw, E1000_RCTL);
1742 reg &= ~E1000_RCTL_CFIEN;
1743 reg |= E1000_RCTL_VFE;
1744 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1746 /* restore VFTA table */
1747 for (i = 0; i < IGB_VFTA_SIZE; i++)
1748 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
1752 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
1754 struct e1000_hw *hw =
1755 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1758 /* VLAN Mode Disable */
1759 reg = E1000_READ_REG(hw, E1000_CTRL);
1760 reg &= ~E1000_CTRL_VME;
1761 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1765 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
1767 struct e1000_hw *hw =
1768 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1771 /* VLAN Mode Enable */
1772 reg = E1000_READ_REG(hw, E1000_CTRL);
1773 reg |= E1000_CTRL_VME;
1774 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1778 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1780 struct e1000_hw *hw =
1781 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1784 /* CTRL_EXT: Extended VLAN */
1785 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1786 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
1787 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1789 /* Update maximum packet length */
1790 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1791 E1000_WRITE_REG(hw, E1000_RLPML,
1792 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1797 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1799 struct e1000_hw *hw =
1800 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1803 /* CTRL_EXT: Extended VLAN */
1804 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1805 reg |= E1000_CTRL_EXT_EXTEND_VLAN;
1806 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1808 /* Update maximum packet length */
1809 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1810 E1000_WRITE_REG(hw, E1000_RLPML,
1811 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1816 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1818 if(mask & ETH_VLAN_STRIP_MASK){
1819 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1820 igb_vlan_hw_strip_enable(dev);
1822 igb_vlan_hw_strip_disable(dev);
1825 if(mask & ETH_VLAN_FILTER_MASK){
1826 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1827 igb_vlan_hw_filter_enable(dev);
1829 igb_vlan_hw_filter_disable(dev);
1832 if(mask & ETH_VLAN_EXTEND_MASK){
1833 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1834 igb_vlan_hw_extend_enable(dev);
1836 igb_vlan_hw_extend_disable(dev);
1842 * It enables the interrupt mask and then enable the interrupt.
1845 * Pointer to struct rte_eth_dev.
1848 * - On success, zero.
1849 * - On failure, a negative value.
1852 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
1854 struct e1000_interrupt *intr =
1855 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1857 intr->mask |= E1000_ICR_LSC;
1863 * It reads ICR and gets interrupt causes, check it and set a bit flag
1864 * to update link status.
1867 * Pointer to struct rte_eth_dev.
1870 * - On success, zero.
1871 * - On failure, a negative value.
1874 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
1877 struct e1000_hw *hw =
1878 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1879 struct e1000_interrupt *intr =
1880 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1882 igb_intr_disable(hw);
1884 /* read-on-clear nic registers here */
1885 icr = E1000_READ_REG(hw, E1000_ICR);
1888 if (icr & E1000_ICR_LSC) {
1889 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1892 if (icr & E1000_ICR_VMMB)
1893 intr->flags |= E1000_FLAG_MAILBOX;
1899 * It executes link_update after knowing an interrupt is prsent.
1902 * Pointer to struct rte_eth_dev.
1905 * - On success, zero.
1906 * - On failure, a negative value.
1909 eth_igb_interrupt_action(struct rte_eth_dev *dev)
1911 struct e1000_hw *hw =
1912 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1913 struct e1000_interrupt *intr =
1914 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1915 uint32_t tctl, rctl;
1916 struct rte_eth_link link;
1919 if (intr->flags & E1000_FLAG_MAILBOX) {
1920 igb_pf_mbx_process(dev);
1921 intr->flags &= ~E1000_FLAG_MAILBOX;
1924 igb_intr_enable(dev);
1925 rte_intr_enable(&(dev->pci_dev->intr_handle));
1927 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
1928 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
1930 /* set get_link_status to check register later */
1931 hw->mac.get_link_status = 1;
1932 ret = eth_igb_link_update(dev, 0);
1934 /* check if link has changed */
1938 memset(&link, 0, sizeof(link));
1939 rte_igb_dev_atomic_read_link_status(dev, &link);
1940 if (link.link_status) {
1942 " Port %d: Link Up - speed %u Mbps - %s",
1944 (unsigned)link.link_speed,
1945 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1946 "full-duplex" : "half-duplex");
1948 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1949 dev->data->port_id);
1951 PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1952 dev->pci_dev->addr.domain,
1953 dev->pci_dev->addr.bus,
1954 dev->pci_dev->addr.devid,
1955 dev->pci_dev->addr.function);
1956 tctl = E1000_READ_REG(hw, E1000_TCTL);
1957 rctl = E1000_READ_REG(hw, E1000_RCTL);
1958 if (link.link_status) {
1960 tctl |= E1000_TCTL_EN;
1961 rctl |= E1000_RCTL_EN;
1964 tctl &= ~E1000_TCTL_EN;
1965 rctl &= ~E1000_RCTL_EN;
1967 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1968 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1969 E1000_WRITE_FLUSH(hw);
1970 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
1977 * Interrupt handler which shall be registered at first.
1980 * Pointer to interrupt handle.
1982 * The address of parameter (struct rte_eth_dev *) regsitered before.
1988 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
1991 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1993 eth_igb_interrupt_get_status(dev);
1994 eth_igb_interrupt_action(dev);
1998 eth_igb_led_on(struct rte_eth_dev *dev)
2000 struct e1000_hw *hw;
2002 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2003 return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
2007 eth_igb_led_off(struct rte_eth_dev *dev)
2009 struct e1000_hw *hw;
2011 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2012 return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
2016 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2018 struct e1000_hw *hw;
2023 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2024 fc_conf->pause_time = hw->fc.pause_time;
2025 fc_conf->high_water = hw->fc.high_water;
2026 fc_conf->low_water = hw->fc.low_water;
2027 fc_conf->send_xon = hw->fc.send_xon;
2028 fc_conf->autoneg = hw->mac.autoneg;
2031 * Return rx_pause and tx_pause status according to actual setting of
2032 * the TFCE and RFCE bits in the CTRL register.
2034 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2035 if (ctrl & E1000_CTRL_TFCE)
2040 if (ctrl & E1000_CTRL_RFCE)
2045 if (rx_pause && tx_pause)
2046 fc_conf->mode = RTE_FC_FULL;
2048 fc_conf->mode = RTE_FC_RX_PAUSE;
2050 fc_conf->mode = RTE_FC_TX_PAUSE;
2052 fc_conf->mode = RTE_FC_NONE;
2058 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2060 struct e1000_hw *hw;
2062 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
2068 uint32_t rx_buf_size;
2069 uint32_t max_high_water;
2072 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2073 if (fc_conf->autoneg != hw->mac.autoneg)
2075 rx_buf_size = igb_get_rx_buffer_size(hw);
2076 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2078 /* At least reserve one Ethernet frame for watermark */
2079 max_high_water = rx_buf_size - ETHER_MAX_LEN;
2080 if ((fc_conf->high_water > max_high_water) ||
2081 (fc_conf->high_water < fc_conf->low_water)) {
2082 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
2083 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
2087 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
2088 hw->fc.pause_time = fc_conf->pause_time;
2089 hw->fc.high_water = fc_conf->high_water;
2090 hw->fc.low_water = fc_conf->low_water;
2091 hw->fc.send_xon = fc_conf->send_xon;
2093 err = e1000_setup_link_generic(hw);
2094 if (err == E1000_SUCCESS) {
2096 /* check if we want to forward MAC frames - driver doesn't have native
2097 * capability to do that, so we'll write the registers ourselves */
2099 rctl = E1000_READ_REG(hw, E1000_RCTL);
2101 /* set or clear MFLCN.PMCF bit depending on configuration */
2102 if (fc_conf->mac_ctrl_frame_fwd != 0)
2103 rctl |= E1000_RCTL_PMCF;
2105 rctl &= ~E1000_RCTL_PMCF;
2107 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2108 E1000_WRITE_FLUSH(hw);
2113 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
2117 #define E1000_RAH_POOLSEL_SHIFT (18)
2119 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2120 uint32_t index, __rte_unused uint32_t pool)
2122 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2125 e1000_rar_set(hw, mac_addr->addr_bytes, index);
2126 rah = E1000_READ_REG(hw, E1000_RAH(index));
2127 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
2128 E1000_WRITE_REG(hw, E1000_RAH(index), rah);
2132 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
2134 uint8_t addr[ETHER_ADDR_LEN];
2135 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2137 memset(addr, 0, sizeof(addr));
2139 e1000_rar_set(hw, addr, index);
2143 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
2144 struct ether_addr *addr)
2146 eth_igb_rar_clear(dev, 0);
2148 eth_igb_rar_set(dev, (void *)addr, 0, 0);
2151 * Virtual Function operations
2154 igbvf_intr_disable(struct e1000_hw *hw)
2156 PMD_INIT_FUNC_TRACE();
2158 /* Clear interrupt mask to stop from interrupts being generated */
2159 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
2161 E1000_WRITE_FLUSH(hw);
2165 igbvf_stop_adapter(struct rte_eth_dev *dev)
2169 struct rte_eth_dev_info dev_info;
2170 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2172 memset(&dev_info, 0, sizeof(dev_info));
2173 eth_igbvf_infos_get(dev, &dev_info);
2175 /* Clear interrupt mask to stop from interrupts being generated */
2176 igbvf_intr_disable(hw);
2178 /* Clear any pending interrupts, flush previous writes */
2179 E1000_READ_REG(hw, E1000_EICR);
2181 /* Disable the transmit unit. Each queue must be disabled. */
2182 for (i = 0; i < dev_info.max_tx_queues; i++)
2183 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
2185 /* Disable the receive unit by stopping each queue */
2186 for (i = 0; i < dev_info.max_rx_queues; i++) {
2187 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
2188 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
2189 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
2190 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
2194 /* flush all queues disables */
2195 E1000_WRITE_FLUSH(hw);
2199 static int eth_igbvf_link_update(struct e1000_hw *hw)
2201 struct e1000_mbx_info *mbx = &hw->mbx;
2202 struct e1000_mac_info *mac = &hw->mac;
2203 int ret_val = E1000_SUCCESS;
2205 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
2208 * We only want to run this if there has been a rst asserted.
2209 * in this case that could mean a link change, device reset,
2210 * or a virtual function reset
2213 /* If we were hit with a reset or timeout drop the link */
2214 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
2215 mac->get_link_status = TRUE;
2217 if (!mac->get_link_status)
2220 /* if link status is down no point in checking to see if pf is up */
2221 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
2224 /* if we passed all the tests above then the link is up and we no
2225 * longer need to check for link */
2226 mac->get_link_status = FALSE;
2234 igbvf_dev_configure(struct rte_eth_dev *dev)
2236 struct rte_eth_conf* conf = &dev->data->dev_conf;
2238 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
2239 dev->data->port_id);
2242 * VF has no ability to enable/disable HW CRC
2243 * Keep the persistent behavior the same as Host PF
2245 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
2246 if (!conf->rxmode.hw_strip_crc) {
2247 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
2248 conf->rxmode.hw_strip_crc = 1;
2251 if (conf->rxmode.hw_strip_crc) {
2252 PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
2253 conf->rxmode.hw_strip_crc = 0;
2261 igbvf_dev_start(struct rte_eth_dev *dev)
2263 struct e1000_hw *hw =
2264 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2267 PMD_INIT_FUNC_TRACE();
2269 hw->mac.ops.reset_hw(hw);
2272 igbvf_set_vfta_all(dev,1);
2274 eth_igbvf_tx_init(dev);
2276 /* This can fail when allocating mbufs for descriptor rings */
2277 ret = eth_igbvf_rx_init(dev);
2279 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2280 igb_dev_clear_queues(dev);
2288 igbvf_dev_stop(struct rte_eth_dev *dev)
2290 PMD_INIT_FUNC_TRACE();
2292 igbvf_stop_adapter(dev);
2295 * Clear what we set, but we still keep shadow_vfta to
2296 * restore after device starts
2298 igbvf_set_vfta_all(dev,0);
2300 igb_dev_clear_queues(dev);
2304 igbvf_dev_close(struct rte_eth_dev *dev)
2306 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2308 PMD_INIT_FUNC_TRACE();
2312 igbvf_dev_stop(dev);
2315 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
2317 struct e1000_mbx_info *mbx = &hw->mbx;
2320 /* After set vlan, vlan strip will also be enabled in igb driver*/
2321 msgbuf[0] = E1000_VF_SET_VLAN;
2323 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
2325 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
2327 return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
2330 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
2332 struct e1000_hw *hw =
2333 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2334 struct e1000_vfta * shadow_vfta =
2335 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2336 int i = 0, j = 0, vfta = 0, mask = 1;
2338 for (i = 0; i < IGB_VFTA_SIZE; i++){
2339 vfta = shadow_vfta->vfta[i];
2342 for (j = 0; j < 32; j++){
2345 (uint16_t)((i<<5)+j), on);
2354 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2356 struct e1000_hw *hw =
2357 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2358 struct e1000_vfta * shadow_vfta =
2359 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2360 uint32_t vid_idx = 0;
2361 uint32_t vid_bit = 0;
2364 PMD_INIT_FUNC_TRACE();
2366 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
2367 ret = igbvf_set_vfta(hw, vlan_id, !!on);
2369 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
2372 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
2373 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
2375 /*Save what we set and retore it after device reset*/
2377 shadow_vfta->vfta[vid_idx] |= vid_bit;
2379 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
2385 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
2387 struct e1000_hw *hw =
2388 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2390 /* index is not used by rar_set() */
2391 hw->mac.ops.rar_set(hw, (void *)addr, 0);
2396 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
2397 struct rte_eth_rss_reta_entry64 *reta_conf,
2402 uint16_t idx, shift;
2403 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2405 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2406 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2407 "(%d) doesn't match the number hardware can supported "
2408 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2412 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2413 idx = i / RTE_RETA_GROUP_SIZE;
2414 shift = i % RTE_RETA_GROUP_SIZE;
2415 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2419 if (mask == IGB_4_BIT_MASK)
2422 r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2423 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
2424 if (mask & (0x1 << j))
2425 reta |= reta_conf[idx].reta[shift + j] <<
2428 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
2430 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
2437 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
2438 struct rte_eth_rss_reta_entry64 *reta_conf,
2443 uint16_t idx, shift;
2444 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2446 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2447 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2448 "(%d) doesn't match the number hardware can supported "
2449 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2453 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2454 idx = i / RTE_RETA_GROUP_SIZE;
2455 shift = i % RTE_RETA_GROUP_SIZE;
2456 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2460 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2461 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
2462 if (mask & (0x1 << j))
2463 reta_conf[idx].reta[shift + j] =
2464 ((reta >> (CHAR_BIT * j)) &
2472 #define MAC_TYPE_FILTER_SUP(type) do {\
2473 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
2474 (type) != e1000_82576)\
2479 eth_igb_syn_filter_set(struct rte_eth_dev *dev,
2480 struct rte_eth_syn_filter *filter,
2483 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2484 uint32_t synqf, rfctl;
2486 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2489 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2492 if (synqf & E1000_SYN_FILTER_ENABLE)
2495 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
2496 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
2498 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2499 if (filter->hig_pri)
2500 rfctl |= E1000_RFCTL_SYNQFP;
2502 rfctl &= ~E1000_RFCTL_SYNQFP;
2504 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
2506 if (!(synqf & E1000_SYN_FILTER_ENABLE))
2511 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
2512 E1000_WRITE_FLUSH(hw);
2517 eth_igb_syn_filter_get(struct rte_eth_dev *dev,
2518 struct rte_eth_syn_filter *filter)
2520 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2521 uint32_t synqf, rfctl;
2523 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2524 if (synqf & E1000_SYN_FILTER_ENABLE) {
2525 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2526 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
2527 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
2528 E1000_SYN_FILTER_QUEUE_SHIFT);
2536 eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
2537 enum rte_filter_op filter_op,
2540 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2543 MAC_TYPE_FILTER_SUP(hw->mac.type);
2545 if (filter_op == RTE_ETH_FILTER_NOP)
2549 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
2554 switch (filter_op) {
2555 case RTE_ETH_FILTER_ADD:
2556 ret = eth_igb_syn_filter_set(dev,
2557 (struct rte_eth_syn_filter *)arg,
2560 case RTE_ETH_FILTER_DELETE:
2561 ret = eth_igb_syn_filter_set(dev,
2562 (struct rte_eth_syn_filter *)arg,
2565 case RTE_ETH_FILTER_GET:
2566 ret = eth_igb_syn_filter_get(dev,
2567 (struct rte_eth_syn_filter *)arg);
2570 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
2578 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
2579 if ((type) != e1000_82580 && (type) != e1000_i350)\
2583 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
2585 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
2586 struct e1000_2tuple_filter_info *filter_info)
2588 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2590 if (filter->priority > E1000_2TUPLE_MAX_PRI)
2591 return -EINVAL; /* filter index is out of range. */
2592 if (filter->tcp_flags > TCP_FLAG_ALL)
2593 return -EINVAL; /* flags is invalid. */
2595 switch (filter->dst_port_mask) {
2597 filter_info->dst_port_mask = 0;
2598 filter_info->dst_port = filter->dst_port;
2601 filter_info->dst_port_mask = 1;
2604 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
2608 switch (filter->proto_mask) {
2610 filter_info->proto_mask = 0;
2611 filter_info->proto = filter->proto;
2614 filter_info->proto_mask = 1;
2617 PMD_DRV_LOG(ERR, "invalid protocol mask.");
2621 filter_info->priority = (uint8_t)filter->priority;
2622 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
2623 filter_info->tcp_flags = filter->tcp_flags;
2625 filter_info->tcp_flags = 0;
2630 static inline struct e1000_2tuple_filter *
2631 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
2632 struct e1000_2tuple_filter_info *key)
2634 struct e1000_2tuple_filter *it;
2636 TAILQ_FOREACH(it, filter_list, entries) {
2637 if (memcmp(key, &it->filter_info,
2638 sizeof(struct e1000_2tuple_filter_info)) == 0) {
2646 * igb_add_2tuple_filter - add a 2tuple filter
2649 * dev: Pointer to struct rte_eth_dev.
2650 * ntuple_filter: ponter to the filter that will be added.
2653 * - On success, zero.
2654 * - On failure, a negative value.
2657 igb_add_2tuple_filter(struct rte_eth_dev *dev,
2658 struct rte_eth_ntuple_filter *ntuple_filter)
2660 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2661 struct e1000_filter_info *filter_info =
2662 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2663 struct e1000_2tuple_filter *filter;
2664 uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
2665 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
2668 filter = rte_zmalloc("e1000_2tuple_filter",
2669 sizeof(struct e1000_2tuple_filter), 0);
2673 ret = ntuple_filter_to_2tuple(ntuple_filter,
2674 &filter->filter_info);
2679 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2680 &filter->filter_info) != NULL) {
2681 PMD_DRV_LOG(ERR, "filter exists.");
2685 filter->queue = ntuple_filter->queue;
2688 * look for an unused 2tuple filter index,
2689 * and insert the filter to list.
2691 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
2692 if (!(filter_info->twotuple_mask & (1 << i))) {
2693 filter_info->twotuple_mask |= 1 << i;
2695 TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
2701 if (i >= E1000_MAX_TTQF_FILTERS) {
2702 PMD_DRV_LOG(ERR, "2tuple filters are full.");
2707 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
2708 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
2709 imir |= E1000_IMIR_PORT_BP;
2711 imir &= ~E1000_IMIR_PORT_BP;
2713 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
2715 ttqf |= E1000_TTQF_QUEUE_ENABLE;
2716 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
2717 ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
2718 if (filter->filter_info.proto_mask == 0)
2719 ttqf &= ~E1000_TTQF_MASK_ENABLE;
2721 /* tcp flags bits setting. */
2722 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
2723 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
2724 imir_ext |= E1000_IMIREXT_CTRL_URG;
2725 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
2726 imir_ext |= E1000_IMIREXT_CTRL_ACK;
2727 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
2728 imir_ext |= E1000_IMIREXT_CTRL_PSH;
2729 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
2730 imir_ext |= E1000_IMIREXT_CTRL_RST;
2731 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
2732 imir_ext |= E1000_IMIREXT_CTRL_SYN;
2733 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
2734 imir_ext |= E1000_IMIREXT_CTRL_FIN;
2736 imir_ext |= E1000_IMIREXT_CTRL_BP;
2737 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
2738 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
2739 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
2744 * igb_remove_2tuple_filter - remove a 2tuple filter
2747 * dev: Pointer to struct rte_eth_dev.
2748 * ntuple_filter: ponter to the filter that will be removed.
2751 * - On success, zero.
2752 * - On failure, a negative value.
2755 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
2756 struct rte_eth_ntuple_filter *ntuple_filter)
2758 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2759 struct e1000_filter_info *filter_info =
2760 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2761 struct e1000_2tuple_filter_info filter_2tuple;
2762 struct e1000_2tuple_filter *filter;
2765 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
2766 ret = ntuple_filter_to_2tuple(ntuple_filter,
2771 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2773 if (filter == NULL) {
2774 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2778 filter_info->twotuple_mask &= ~(1 << filter->index);
2779 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
2782 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
2783 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
2784 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
2788 static inline struct e1000_flex_filter *
2789 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
2790 struct e1000_flex_filter_info *key)
2792 struct e1000_flex_filter *it;
2794 TAILQ_FOREACH(it, filter_list, entries) {
2795 if (memcmp(key, &it->filter_info,
2796 sizeof(struct e1000_flex_filter_info)) == 0)
2804 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
2805 struct rte_eth_flex_filter *filter,
2808 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2809 struct e1000_filter_info *filter_info =
2810 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2811 struct e1000_flex_filter *flex_filter, *it;
2812 uint32_t wufc, queueing, mask;
2814 uint8_t shift, i, j = 0;
2816 flex_filter = rte_zmalloc("e1000_flex_filter",
2817 sizeof(struct e1000_flex_filter), 0);
2818 if (flex_filter == NULL)
2821 flex_filter->filter_info.len = filter->len;
2822 flex_filter->filter_info.priority = filter->priority;
2823 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
2824 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
2826 /* reverse bits in flex filter's mask*/
2827 for (shift = 0; shift < CHAR_BIT; shift++) {
2828 if (filter->mask[i] & (0x01 << shift))
2829 mask |= (0x80 >> shift);
2831 flex_filter->filter_info.mask[i] = mask;
2834 wufc = E1000_READ_REG(hw, E1000_WUFC);
2835 if (flex_filter->index < E1000_MAX_FHFT)
2836 reg_off = E1000_FHFT(flex_filter->index);
2838 reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
2841 if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
2842 &flex_filter->filter_info) != NULL) {
2843 PMD_DRV_LOG(ERR, "filter exists.");
2844 rte_free(flex_filter);
2847 flex_filter->queue = filter->queue;
2849 * look for an unused flex filter index
2850 * and insert the filter into the list.
2852 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
2853 if (!(filter_info->flex_mask & (1 << i))) {
2854 filter_info->flex_mask |= 1 << i;
2855 flex_filter->index = i;
2856 TAILQ_INSERT_TAIL(&filter_info->flex_list,
2862 if (i >= E1000_MAX_FLEX_FILTERS) {
2863 PMD_DRV_LOG(ERR, "flex filters are full.");
2864 rte_free(flex_filter);
2868 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
2869 (E1000_WUFC_FLX0 << flex_filter->index));
2870 queueing = filter->len |
2871 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
2872 (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
2873 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
2875 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
2876 E1000_WRITE_REG(hw, reg_off,
2877 flex_filter->filter_info.dwords[j]);
2878 reg_off += sizeof(uint32_t);
2879 E1000_WRITE_REG(hw, reg_off,
2880 flex_filter->filter_info.dwords[++j]);
2881 reg_off += sizeof(uint32_t);
2882 E1000_WRITE_REG(hw, reg_off,
2883 (uint32_t)flex_filter->filter_info.mask[i]);
2884 reg_off += sizeof(uint32_t) * 2;
2888 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
2889 &flex_filter->filter_info);
2891 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2892 rte_free(flex_filter);
2896 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
2897 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
2898 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
2899 (~(E1000_WUFC_FLX0 << it->index)));
2901 filter_info->flex_mask &= ~(1 << it->index);
2902 TAILQ_REMOVE(&filter_info->flex_list, it, entries);
2904 rte_free(flex_filter);
2911 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
2912 struct rte_eth_flex_filter *filter)
2914 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2915 struct e1000_filter_info *filter_info =
2916 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2917 struct e1000_flex_filter flex_filter, *it;
2918 uint32_t wufc, queueing, wufc_en = 0;
2920 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
2921 flex_filter.filter_info.len = filter->len;
2922 flex_filter.filter_info.priority = filter->priority;
2923 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
2924 memcpy(flex_filter.filter_info.mask, filter->mask,
2925 RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
2927 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
2928 &flex_filter.filter_info);
2930 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2934 wufc = E1000_READ_REG(hw, E1000_WUFC);
2935 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
2937 if ((wufc & wufc_en) == wufc_en) {
2938 uint32_t reg_off = 0;
2939 if (it->index < E1000_MAX_FHFT)
2940 reg_off = E1000_FHFT(it->index);
2942 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
2944 queueing = E1000_READ_REG(hw,
2945 reg_off + E1000_FHFT_QUEUEING_OFFSET);
2946 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
2947 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
2948 E1000_FHFT_QUEUEING_PRIO_SHIFT;
2949 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
2950 E1000_FHFT_QUEUEING_QUEUE_SHIFT;
2957 eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
2958 enum rte_filter_op filter_op,
2961 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2962 struct rte_eth_flex_filter *filter;
2965 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
2967 if (filter_op == RTE_ETH_FILTER_NOP)
2971 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
2976 filter = (struct rte_eth_flex_filter *)arg;
2977 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
2978 || filter->len % sizeof(uint64_t) != 0) {
2979 PMD_DRV_LOG(ERR, "filter's length is out of range");
2982 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
2983 PMD_DRV_LOG(ERR, "filter's priority is out of range");
2987 switch (filter_op) {
2988 case RTE_ETH_FILTER_ADD:
2989 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
2991 case RTE_ETH_FILTER_DELETE:
2992 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
2994 case RTE_ETH_FILTER_GET:
2995 ret = eth_igb_get_flex_filter(dev, filter);
2998 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
3006 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
3008 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
3009 struct e1000_5tuple_filter_info *filter_info)
3011 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
3013 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3014 return -EINVAL; /* filter index is out of range. */
3015 if (filter->tcp_flags > TCP_FLAG_ALL)
3016 return -EINVAL; /* flags is invalid. */
3018 switch (filter->dst_ip_mask) {
3020 filter_info->dst_ip_mask = 0;
3021 filter_info->dst_ip = filter->dst_ip;
3024 filter_info->dst_ip_mask = 1;
3027 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3031 switch (filter->src_ip_mask) {
3033 filter_info->src_ip_mask = 0;
3034 filter_info->src_ip = filter->src_ip;
3037 filter_info->src_ip_mask = 1;
3040 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3044 switch (filter->dst_port_mask) {
3046 filter_info->dst_port_mask = 0;
3047 filter_info->dst_port = filter->dst_port;
3050 filter_info->dst_port_mask = 1;
3053 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3057 switch (filter->src_port_mask) {
3059 filter_info->src_port_mask = 0;
3060 filter_info->src_port = filter->src_port;
3063 filter_info->src_port_mask = 1;
3066 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3070 switch (filter->proto_mask) {
3072 filter_info->proto_mask = 0;
3073 filter_info->proto = filter->proto;
3076 filter_info->proto_mask = 1;
3079 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3083 filter_info->priority = (uint8_t)filter->priority;
3084 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3085 filter_info->tcp_flags = filter->tcp_flags;
3087 filter_info->tcp_flags = 0;
3092 static inline struct e1000_5tuple_filter *
3093 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
3094 struct e1000_5tuple_filter_info *key)
3096 struct e1000_5tuple_filter *it;
3098 TAILQ_FOREACH(it, filter_list, entries) {
3099 if (memcmp(key, &it->filter_info,
3100 sizeof(struct e1000_5tuple_filter_info)) == 0) {
3108 * igb_add_5tuple_filter_82576 - add a 5tuple filter
3111 * dev: Pointer to struct rte_eth_dev.
3112 * ntuple_filter: ponter to the filter that will be added.
3115 * - On success, zero.
3116 * - On failure, a negative value.
3119 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
3120 struct rte_eth_ntuple_filter *ntuple_filter)
3122 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3123 struct e1000_filter_info *filter_info =
3124 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3125 struct e1000_5tuple_filter *filter;
3126 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
3127 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3131 filter = rte_zmalloc("e1000_5tuple_filter",
3132 sizeof(struct e1000_5tuple_filter), 0);
3136 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3137 &filter->filter_info);
3143 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3144 &filter->filter_info) != NULL) {
3145 PMD_DRV_LOG(ERR, "filter exists.");
3149 filter->queue = ntuple_filter->queue;
3152 * look for an unused 5tuple filter index,
3153 * and insert the filter to list.
3155 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
3156 if (!(filter_info->fivetuple_mask & (1 << i))) {
3157 filter_info->fivetuple_mask |= 1 << i;
3159 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3165 if (i >= E1000_MAX_FTQF_FILTERS) {
3166 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3171 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
3172 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
3173 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
3174 if (filter->filter_info.dst_ip_mask == 0)
3175 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
3176 if (filter->filter_info.src_port_mask == 0)
3177 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
3178 if (filter->filter_info.proto_mask == 0)
3179 ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
3180 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
3181 E1000_FTQF_QUEUE_MASK;
3182 ftqf |= E1000_FTQF_QUEUE_ENABLE;
3183 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
3184 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
3185 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
3187 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
3188 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
3190 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3191 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3192 imir |= E1000_IMIR_PORT_BP;
3194 imir &= ~E1000_IMIR_PORT_BP;
3195 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3197 /* tcp flags bits setting. */
3198 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
3199 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
3200 imir_ext |= E1000_IMIREXT_CTRL_URG;
3201 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
3202 imir_ext |= E1000_IMIREXT_CTRL_ACK;
3203 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
3204 imir_ext |= E1000_IMIREXT_CTRL_PSH;
3205 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
3206 imir_ext |= E1000_IMIREXT_CTRL_RST;
3207 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
3208 imir_ext |= E1000_IMIREXT_CTRL_SYN;
3209 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
3210 imir_ext |= E1000_IMIREXT_CTRL_FIN;
3212 imir_ext |= E1000_IMIREXT_CTRL_BP;
3213 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3214 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3219 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
3222 * dev: Pointer to struct rte_eth_dev.
3223 * ntuple_filter: ponter to the filter that will be removed.
3226 * - On success, zero.
3227 * - On failure, a negative value.
3230 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
3231 struct rte_eth_ntuple_filter *ntuple_filter)
3233 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3234 struct e1000_filter_info *filter_info =
3235 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3236 struct e1000_5tuple_filter_info filter_5tuple;
3237 struct e1000_5tuple_filter *filter;
3240 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
3241 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3246 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3248 if (filter == NULL) {
3249 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3253 filter_info->fivetuple_mask &= ~(1 << filter->index);
3254 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3257 E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
3258 E1000_FTQF_VF_BP | E1000_FTQF_MASK);
3259 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
3260 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
3261 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
3262 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3263 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3268 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3271 struct e1000_hw *hw;
3272 struct rte_eth_dev_info dev_info;
3273 uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
3276 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3278 #ifdef RTE_LIBRTE_82571_SUPPORT
3279 /* XXX: not bigger than max_rx_pktlen */
3280 if (hw->mac.type == e1000_82571)
3283 eth_igb_infos_get(dev, &dev_info);
3285 /* check that mtu is within the allowed range */
3286 if ((mtu < ETHER_MIN_MTU) ||
3287 (frame_size > dev_info.max_rx_pktlen))
3290 /* refuse mtu that requires the support of scattered packets when this
3291 * feature has not been enabled before. */
3292 if (!dev->data->scattered_rx &&
3293 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
3296 rctl = E1000_READ_REG(hw, E1000_RCTL);
3298 /* switch to jumbo mode if needed */
3299 if (frame_size > ETHER_MAX_LEN) {
3300 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3301 rctl |= E1000_RCTL_LPE;
3303 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3304 rctl &= ~E1000_RCTL_LPE;
3306 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3308 /* update max frame size */
3309 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3311 E1000_WRITE_REG(hw, E1000_RLPML,
3312 dev->data->dev_conf.rxmode.max_rx_pkt_len);
3318 * igb_add_del_ntuple_filter - add or delete a ntuple filter
3321 * dev: Pointer to struct rte_eth_dev.
3322 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3323 * add: if true, add filter, if false, remove filter
3326 * - On success, zero.
3327 * - On failure, a negative value.
3330 igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
3331 struct rte_eth_ntuple_filter *ntuple_filter,
3334 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3337 switch (ntuple_filter->flags) {
3338 case RTE_5TUPLE_FLAGS:
3339 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3340 if (hw->mac.type != e1000_82576)
3343 ret = igb_add_5tuple_filter_82576(dev,
3346 ret = igb_remove_5tuple_filter_82576(dev,
3349 case RTE_2TUPLE_FLAGS:
3350 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3351 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3354 ret = igb_add_2tuple_filter(dev, ntuple_filter);
3356 ret = igb_remove_2tuple_filter(dev, ntuple_filter);
3367 * igb_get_ntuple_filter - get a ntuple filter
3370 * dev: Pointer to struct rte_eth_dev.
3371 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3374 * - On success, zero.
3375 * - On failure, a negative value.
3378 igb_get_ntuple_filter(struct rte_eth_dev *dev,
3379 struct rte_eth_ntuple_filter *ntuple_filter)
3381 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3382 struct e1000_filter_info *filter_info =
3383 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3384 struct e1000_5tuple_filter_info filter_5tuple;
3385 struct e1000_2tuple_filter_info filter_2tuple;
3386 struct e1000_5tuple_filter *p_5tuple_filter;
3387 struct e1000_2tuple_filter *p_2tuple_filter;
3390 switch (ntuple_filter->flags) {
3391 case RTE_5TUPLE_FLAGS:
3392 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3393 if (hw->mac.type != e1000_82576)
3395 memset(&filter_5tuple,
3397 sizeof(struct e1000_5tuple_filter_info));
3398 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3402 p_5tuple_filter = igb_5tuple_filter_lookup_82576(
3403 &filter_info->fivetuple_list,
3405 if (p_5tuple_filter == NULL) {
3406 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3409 ntuple_filter->queue = p_5tuple_filter->queue;
3411 case RTE_2TUPLE_FLAGS:
3412 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3413 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3415 memset(&filter_2tuple,
3417 sizeof(struct e1000_2tuple_filter_info));
3418 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
3421 p_2tuple_filter = igb_2tuple_filter_lookup(
3422 &filter_info->twotuple_list,
3424 if (p_2tuple_filter == NULL) {
3425 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3428 ntuple_filter->queue = p_2tuple_filter->queue;
3439 * igb_ntuple_filter_handle - Handle operations for ntuple filter.
3440 * @dev: pointer to rte_eth_dev structure
3441 * @filter_op:operation will be taken.
3442 * @arg: a pointer to specific structure corresponding to the filter_op
3445 igb_ntuple_filter_handle(struct rte_eth_dev *dev,
3446 enum rte_filter_op filter_op,
3449 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3452 MAC_TYPE_FILTER_SUP(hw->mac.type);
3454 if (filter_op == RTE_ETH_FILTER_NOP)
3458 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3463 switch (filter_op) {
3464 case RTE_ETH_FILTER_ADD:
3465 ret = igb_add_del_ntuple_filter(dev,
3466 (struct rte_eth_ntuple_filter *)arg,
3469 case RTE_ETH_FILTER_DELETE:
3470 ret = igb_add_del_ntuple_filter(dev,
3471 (struct rte_eth_ntuple_filter *)arg,
3474 case RTE_ETH_FILTER_GET:
3475 ret = igb_get_ntuple_filter(dev,
3476 (struct rte_eth_ntuple_filter *)arg);
3479 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3487 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
3492 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3493 if (filter_info->ethertype_filters[i] == ethertype &&
3494 (filter_info->ethertype_mask & (1 << i)))
3501 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
3506 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3507 if (!(filter_info->ethertype_mask & (1 << i))) {
3508 filter_info->ethertype_mask |= 1 << i;
3509 filter_info->ethertype_filters[i] = ethertype;
3517 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
3520 if (idx >= E1000_MAX_ETQF_FILTERS)
3522 filter_info->ethertype_mask &= ~(1 << idx);
3523 filter_info->ethertype_filters[idx] = 0;
3529 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
3530 struct rte_eth_ethertype_filter *filter,
3533 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3534 struct e1000_filter_info *filter_info =
3535 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3539 if (filter->ether_type == ETHER_TYPE_IPv4 ||
3540 filter->ether_type == ETHER_TYPE_IPv6) {
3541 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
3542 " ethertype filter.", filter->ether_type);
3546 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
3547 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
3550 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
3551 PMD_DRV_LOG(ERR, "drop option is unsupported.");
3555 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3556 if (ret >= 0 && add) {
3557 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
3558 filter->ether_type);
3561 if (ret < 0 && !add) {
3562 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3563 filter->ether_type);
3568 ret = igb_ethertype_filter_insert(filter_info,
3569 filter->ether_type);
3571 PMD_DRV_LOG(ERR, "ethertype filters are full.");
3575 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
3576 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
3577 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
3579 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
3583 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
3584 E1000_WRITE_FLUSH(hw);
3590 igb_get_ethertype_filter(struct rte_eth_dev *dev,
3591 struct rte_eth_ethertype_filter *filter)
3593 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3594 struct e1000_filter_info *filter_info =
3595 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3599 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3601 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3602 filter->ether_type);
3606 etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
3607 if (etqf & E1000_ETQF_FILTER_ENABLE) {
3608 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
3610 filter->queue = (etqf & E1000_ETQF_QUEUE) >>
3611 E1000_ETQF_QUEUE_SHIFT;
3619 * igb_ethertype_filter_handle - Handle operations for ethertype filter.
3620 * @dev: pointer to rte_eth_dev structure
3621 * @filter_op:operation will be taken.
3622 * @arg: a pointer to specific structure corresponding to the filter_op
3625 igb_ethertype_filter_handle(struct rte_eth_dev *dev,
3626 enum rte_filter_op filter_op,
3629 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3632 MAC_TYPE_FILTER_SUP(hw->mac.type);
3634 if (filter_op == RTE_ETH_FILTER_NOP)
3638 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3643 switch (filter_op) {
3644 case RTE_ETH_FILTER_ADD:
3645 ret = igb_add_del_ethertype_filter(dev,
3646 (struct rte_eth_ethertype_filter *)arg,
3649 case RTE_ETH_FILTER_DELETE:
3650 ret = igb_add_del_ethertype_filter(dev,
3651 (struct rte_eth_ethertype_filter *)arg,
3654 case RTE_ETH_FILTER_GET:
3655 ret = igb_get_ethertype_filter(dev,
3656 (struct rte_eth_ethertype_filter *)arg);
3659 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3667 eth_igb_filter_ctrl(struct rte_eth_dev *dev,
3668 enum rte_filter_type filter_type,
3669 enum rte_filter_op filter_op,
3674 switch (filter_type) {
3675 case RTE_ETH_FILTER_NTUPLE:
3676 ret = igb_ntuple_filter_handle(dev, filter_op, arg);
3678 case RTE_ETH_FILTER_ETHERTYPE:
3679 ret = igb_ethertype_filter_handle(dev, filter_op, arg);
3681 case RTE_ETH_FILTER_SYN:
3682 ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
3684 case RTE_ETH_FILTER_FLEXIBLE:
3685 ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
3688 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3697 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
3698 struct ether_addr *mc_addr_set,
3699 uint32_t nb_mc_addr)
3701 struct e1000_hw *hw;
3703 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3704 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
3709 igb_timesync_enable(struct rte_eth_dev *dev)
3711 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3714 /* Start incrementing the register used to timestamp PTP packets. */
3715 E1000_WRITE_REG(hw, E1000_TIMINCA, E1000_TIMINCA_INIT);
3717 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3718 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
3720 E1000_ETQF_FILTER_ENABLE |
3723 /* Enable timestamping of received PTP packets. */
3724 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3725 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
3726 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
3728 /* Enable Timestamping of transmitted PTP packets. */
3729 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3730 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
3731 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
3737 igb_timesync_disable(struct rte_eth_dev *dev)
3739 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3742 /* Disable timestamping of transmitted PTP packets. */
3743 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3744 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
3745 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
3747 /* Disable timestamping of received PTP packets. */
3748 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3749 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
3750 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
3752 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3753 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
3755 /* Stop incrementating the System Time registers. */
3756 E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
3762 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3763 struct timespec *timestamp,
3764 uint32_t flags __rte_unused)
3766 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3767 uint32_t tsync_rxctl;
3771 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3772 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
3775 rx_stmpl = E1000_READ_REG(hw, E1000_RXSTMPL);
3776 rx_stmph = E1000_READ_REG(hw, E1000_RXSTMPH);
3778 timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
3779 timestamp->tv_nsec = 0;
3785 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3786 struct timespec *timestamp)
3788 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3789 uint32_t tsync_txctl;
3793 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3794 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
3797 tx_stmpl = E1000_READ_REG(hw, E1000_TXSTMPL);
3798 tx_stmph = E1000_READ_REG(hw, E1000_TXSTMPH);
3800 timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
3801 timestamp->tv_nsec = 0;
3806 static struct rte_driver pmd_igb_drv = {
3808 .init = rte_igb_pmd_init,
3811 static struct rte_driver pmd_igbvf_drv = {
3813 .init = rte_igbvf_pmd_init,
3816 PMD_REGISTER_DRIVER(pmd_igb_drv);
3817 PMD_REGISTER_DRIVER(pmd_igbvf_drv);