4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
50 #include <rte_tailq.h>
52 #include <rte_atomic.h>
53 #include <rte_malloc.h>
56 #include "e1000_logs.h"
57 #include "e1000/e1000_api.h"
58 #include "e1000_ethdev.h"
60 static int eth_igb_configure(struct rte_eth_dev *dev);
61 static int eth_igb_start(struct rte_eth_dev *dev);
62 static void eth_igb_stop(struct rte_eth_dev *dev);
63 static void eth_igb_close(struct rte_eth_dev *dev);
64 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
65 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
66 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
67 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
68 static int eth_igb_link_update(struct rte_eth_dev *dev,
69 int wait_to_complete);
70 static void eth_igb_stats_get(struct rte_eth_dev *dev,
71 struct rte_eth_stats *rte_stats);
72 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
73 static void eth_igb_infos_get(struct rte_eth_dev *dev,
74 struct rte_eth_dev_info *dev_info);
75 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
76 struct rte_eth_fc_conf *fc_conf);
77 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
78 struct rte_eth_fc_conf *fc_conf);
79 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
80 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
81 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
82 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
84 static int igb_hardware_init(struct e1000_hw *hw);
85 static void igb_hw_control_acquire(struct e1000_hw *hw);
86 static void igb_hw_control_release(struct e1000_hw *hw);
87 static void igb_init_manageability(struct e1000_hw *hw);
88 static void igb_release_manageability(struct e1000_hw *hw);
90 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
91 uint16_t vlan_id, int on);
92 static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
93 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
95 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
96 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
97 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
98 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
99 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
100 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
102 static int eth_igb_led_on(struct rte_eth_dev *dev);
103 static int eth_igb_led_off(struct rte_eth_dev *dev);
105 static void igb_intr_disable(struct e1000_hw *hw);
106 static int igb_get_rx_buffer_size(struct e1000_hw *hw);
107 static void eth_igb_rar_set(struct rte_eth_dev *dev,
108 struct ether_addr *mac_addr,
109 uint32_t index, uint32_t pool);
110 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
112 static void igbvf_intr_disable(struct e1000_hw *hw);
113 static int igbvf_dev_configure(struct rte_eth_dev *dev);
114 static int igbvf_dev_start(struct rte_eth_dev *dev);
115 static void igbvf_dev_stop(struct rte_eth_dev *dev);
116 static void igbvf_dev_close(struct rte_eth_dev *dev);
117 static int eth_igbvf_link_update(struct e1000_hw *hw);
118 static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
119 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
120 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
121 uint16_t vlan_id, int on);
122 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
123 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
124 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
125 struct rte_eth_rss_reta *reta_conf);
126 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
127 struct rte_eth_rss_reta *reta_conf);
129 static int eth_igb_add_syn_filter(struct rte_eth_dev *dev,
130 struct rte_syn_filter *filter, uint16_t rx_queue);
131 static int eth_igb_remove_syn_filter(struct rte_eth_dev *dev);
132 static int eth_igb_get_syn_filter(struct rte_eth_dev *dev,
133 struct rte_syn_filter *filter, uint16_t *rx_queue);
134 static int eth_igb_add_ethertype_filter(struct rte_eth_dev *dev,
136 struct rte_ethertype_filter *filter, uint16_t rx_queue);
137 static int eth_igb_remove_ethertype_filter(struct rte_eth_dev *dev,
139 static int eth_igb_get_ethertype_filter(struct rte_eth_dev *dev,
141 struct rte_ethertype_filter *filter, uint16_t *rx_queue);
142 static int eth_igb_add_2tuple_filter(struct rte_eth_dev *dev,
144 struct rte_2tuple_filter *filter, uint16_t rx_queue);
145 static int eth_igb_remove_2tuple_filter(struct rte_eth_dev *dev,
147 static int eth_igb_get_2tuple_filter(struct rte_eth_dev *dev,
149 struct rte_2tuple_filter *filter, uint16_t *rx_queue);
150 static int eth_igb_add_flex_filter(struct rte_eth_dev *dev,
152 struct rte_flex_filter *filter, uint16_t rx_queue);
153 static int eth_igb_remove_flex_filter(struct rte_eth_dev *dev,
155 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
157 struct rte_flex_filter *filter, uint16_t *rx_queue);
158 static int eth_igb_add_5tuple_filter(struct rte_eth_dev *dev,
160 struct rte_5tuple_filter *filter, uint16_t rx_queue);
161 static int eth_igb_remove_5tuple_filter(struct rte_eth_dev *dev,
163 static int eth_igb_get_5tuple_filter(struct rte_eth_dev *dev,
165 struct rte_5tuple_filter *filter, uint16_t *rx_queue);
168 * Define VF Stats MACRO for Non "cleared on read" register
170 #define UPDATE_VF_STAT(reg, last, cur) \
172 u32 latest = E1000_READ_REG(hw, reg); \
173 cur += latest - last; \
178 #define IGB_FC_PAUSE_TIME 0x0680
179 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
180 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
182 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
184 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
187 * The set of PCI devices this driver supports
189 static struct rte_pci_id pci_id_igb_map[] = {
191 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
192 #include "rte_pci_dev_ids.h"
198 * The set of PCI devices this driver supports (for 82576&I350 VF)
200 static struct rte_pci_id pci_id_igbvf_map[] = {
202 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
203 #include "rte_pci_dev_ids.h"
208 static struct eth_dev_ops eth_igb_ops = {
209 .dev_configure = eth_igb_configure,
210 .dev_start = eth_igb_start,
211 .dev_stop = eth_igb_stop,
212 .dev_close = eth_igb_close,
213 .promiscuous_enable = eth_igb_promiscuous_enable,
214 .promiscuous_disable = eth_igb_promiscuous_disable,
215 .allmulticast_enable = eth_igb_allmulticast_enable,
216 .allmulticast_disable = eth_igb_allmulticast_disable,
217 .link_update = eth_igb_link_update,
218 .stats_get = eth_igb_stats_get,
219 .stats_reset = eth_igb_stats_reset,
220 .dev_infos_get = eth_igb_infos_get,
221 .vlan_filter_set = eth_igb_vlan_filter_set,
222 .vlan_tpid_set = eth_igb_vlan_tpid_set,
223 .vlan_offload_set = eth_igb_vlan_offload_set,
224 .rx_queue_setup = eth_igb_rx_queue_setup,
225 .rx_queue_release = eth_igb_rx_queue_release,
226 .rx_queue_count = eth_igb_rx_queue_count,
227 .rx_descriptor_done = eth_igb_rx_descriptor_done,
228 .tx_queue_setup = eth_igb_tx_queue_setup,
229 .tx_queue_release = eth_igb_tx_queue_release,
230 .dev_led_on = eth_igb_led_on,
231 .dev_led_off = eth_igb_led_off,
232 .flow_ctrl_get = eth_igb_flow_ctrl_get,
233 .flow_ctrl_set = eth_igb_flow_ctrl_set,
234 .mac_addr_add = eth_igb_rar_set,
235 .mac_addr_remove = eth_igb_rar_clear,
236 .reta_update = eth_igb_rss_reta_update,
237 .reta_query = eth_igb_rss_reta_query,
238 .rss_hash_update = eth_igb_rss_hash_update,
239 .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
240 .add_syn_filter = eth_igb_add_syn_filter,
241 .remove_syn_filter = eth_igb_remove_syn_filter,
242 .get_syn_filter = eth_igb_get_syn_filter,
243 .add_ethertype_filter = eth_igb_add_ethertype_filter,
244 .remove_ethertype_filter = eth_igb_remove_ethertype_filter,
245 .get_ethertype_filter = eth_igb_get_ethertype_filter,
246 .add_2tuple_filter = eth_igb_add_2tuple_filter,
247 .remove_2tuple_filter = eth_igb_remove_2tuple_filter,
248 .get_2tuple_filter = eth_igb_get_2tuple_filter,
249 .add_flex_filter = eth_igb_add_flex_filter,
250 .remove_flex_filter = eth_igb_remove_flex_filter,
251 .get_flex_filter = eth_igb_get_flex_filter,
252 .add_5tuple_filter = eth_igb_add_5tuple_filter,
253 .remove_5tuple_filter = eth_igb_remove_5tuple_filter,
254 .get_5tuple_filter = eth_igb_get_5tuple_filter,
258 * dev_ops for virtual function, bare necessities for basic vf
259 * operation have been implemented
261 static struct eth_dev_ops igbvf_eth_dev_ops = {
262 .dev_configure = igbvf_dev_configure,
263 .dev_start = igbvf_dev_start,
264 .dev_stop = igbvf_dev_stop,
265 .dev_close = igbvf_dev_close,
266 .link_update = eth_igb_link_update,
267 .stats_get = eth_igbvf_stats_get,
268 .stats_reset = eth_igbvf_stats_reset,
269 .vlan_filter_set = igbvf_vlan_filter_set,
270 .dev_infos_get = eth_igb_infos_get,
271 .rx_queue_setup = eth_igb_rx_queue_setup,
272 .rx_queue_release = eth_igb_rx_queue_release,
273 .tx_queue_setup = eth_igb_tx_queue_setup,
274 .tx_queue_release = eth_igb_tx_queue_release,
278 * Atomically reads the link status information from global
279 * structure rte_eth_dev.
282 * - Pointer to the structure rte_eth_dev to read from.
283 * - Pointer to the buffer to be saved with the link status.
286 * - On success, zero.
287 * - On failure, negative value.
290 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
291 struct rte_eth_link *link)
293 struct rte_eth_link *dst = link;
294 struct rte_eth_link *src = &(dev->data->dev_link);
296 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
297 *(uint64_t *)src) == 0)
304 * Atomically writes the link status information into global
305 * structure rte_eth_dev.
308 * - Pointer to the structure rte_eth_dev to read from.
309 * - Pointer to the buffer to be saved with the link status.
312 * - On success, zero.
313 * - On failure, negative value.
316 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
317 struct rte_eth_link *link)
319 struct rte_eth_link *dst = &(dev->data->dev_link);
320 struct rte_eth_link *src = link;
322 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
323 *(uint64_t *)src) == 0)
330 igb_intr_enable(struct rte_eth_dev *dev)
332 struct e1000_interrupt *intr =
333 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
334 struct e1000_hw *hw =
335 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
337 E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
338 E1000_WRITE_FLUSH(hw);
342 igb_intr_disable(struct e1000_hw *hw)
344 E1000_WRITE_REG(hw, E1000_IMC, ~0);
345 E1000_WRITE_FLUSH(hw);
348 static inline int32_t
349 igb_pf_reset_hw(struct e1000_hw *hw)
354 status = e1000_reset_hw(hw);
356 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
357 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
358 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
359 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
360 E1000_WRITE_FLUSH(hw);
366 igb_identify_hardware(struct rte_eth_dev *dev)
368 struct e1000_hw *hw =
369 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
371 hw->vendor_id = dev->pci_dev->id.vendor_id;
372 hw->device_id = dev->pci_dev->id.device_id;
373 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
374 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
376 e1000_set_mac_type(hw);
378 /* need to check if it is a vf device below */
382 igb_reset_swfw_lock(struct e1000_hw *hw)
387 * Do mac ops initialization manually here, since we will need
388 * some function pointers set by this call.
390 ret_val = e1000_init_mac_params(hw);
395 * SMBI lock should not fail in this early stage. If this is the case,
396 * it is due to an improper exit of the application.
397 * So force the release of the faulty lock.
399 if (e1000_get_hw_semaphore_generic(hw) < 0) {
400 DEBUGOUT("SMBI lock released");
402 e1000_put_hw_semaphore_generic(hw);
404 if (hw->mac.ops.acquire_swfw_sync != NULL) {
408 * Phy lock should not fail in this early stage. If this is the case,
409 * it is due to an improper exit of the application.
410 * So force the release of the faulty lock.
412 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
413 if (hw->bus.func > E1000_FUNC_1)
415 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
416 DEBUGOUT1("SWFW phy%d lock released", hw->bus.func);
418 hw->mac.ops.release_swfw_sync(hw, mask);
421 * This one is more tricky since it is common to all ports; but
422 * swfw_sync retries last long enough (1s) to be almost sure that if
423 * lock can not be taken it is due to an improper lock of the
426 mask = E1000_SWFW_EEP_SM;
427 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
428 DEBUGOUT("SWFW common locks released");
430 hw->mac.ops.release_swfw_sync(hw, mask);
433 return E1000_SUCCESS;
437 eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
438 struct rte_eth_dev *eth_dev)
441 struct rte_pci_device *pci_dev;
442 struct e1000_hw *hw =
443 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
444 struct e1000_vfta * shadow_vfta =
445 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
448 pci_dev = eth_dev->pci_dev;
449 eth_dev->dev_ops = ð_igb_ops;
450 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
451 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
453 /* for secondary processes, we don't initialise any further as primary
454 * has already done this work. Only check we don't need a different
456 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
457 if (eth_dev->data->scattered_rx)
458 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
462 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
464 igb_identify_hardware(eth_dev);
465 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
470 e1000_get_bus_info(hw);
472 /* Reset any pending lock */
473 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
478 /* Finish initialization */
479 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
485 hw->phy.autoneg_wait_to_complete = 0;
486 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
489 if (hw->phy.media_type == e1000_media_type_copper) {
490 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
491 hw->phy.disable_polarity_correction = 0;
492 hw->phy.ms_type = e1000_ms_hw_default;
496 * Start from a known state, this is important in reading the nvm
501 /* Make sure we have a good EEPROM before we read from it */
502 if (e1000_validate_nvm_checksum(hw) < 0) {
504 * Some PCI-E parts fail the first check due to
505 * the link being in sleep state, call it again,
506 * if it fails a second time its a real issue.
508 if (e1000_validate_nvm_checksum(hw) < 0) {
509 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
515 /* Read the permanent MAC address out of the EEPROM */
516 if (e1000_read_mac_addr(hw) != 0) {
517 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
522 /* Allocate memory for storing MAC addresses */
523 eth_dev->data->mac_addrs = rte_zmalloc("e1000",
524 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
525 if (eth_dev->data->mac_addrs == NULL) {
526 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
527 "store MAC addresses",
528 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
533 /* Copy the permanent MAC address */
534 ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]);
536 /* initialize the vfta */
537 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
539 /* Now initialize the hardware */
540 if (igb_hardware_init(hw) != 0) {
541 PMD_INIT_LOG(ERR, "Hardware initialization failed");
542 rte_free(eth_dev->data->mac_addrs);
543 eth_dev->data->mac_addrs = NULL;
547 hw->mac.get_link_status = 1;
549 /* Indicate SOL/IDER usage */
550 if (e1000_check_reset_block(hw) < 0) {
551 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
555 /* initialize PF if max_vfs not zero */
556 igb_pf_host_init(eth_dev);
558 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
559 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
560 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
561 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
562 E1000_WRITE_FLUSH(hw);
564 PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n",
565 eth_dev->data->port_id, pci_dev->id.vendor_id,
566 pci_dev->id.device_id);
568 rte_intr_callback_register(&(pci_dev->intr_handle),
569 eth_igb_interrupt_handler, (void *)eth_dev);
571 /* enable uio intr after callback register */
572 rte_intr_enable(&(pci_dev->intr_handle));
574 /* enable support intr */
575 igb_intr_enable(eth_dev);
580 igb_hw_control_release(hw);
586 * Virtual Function device init
589 eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
590 struct rte_eth_dev *eth_dev)
592 struct rte_pci_device *pci_dev;
593 struct e1000_hw *hw =
594 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
597 PMD_INIT_LOG(DEBUG, "eth_igbvf_dev_init");
599 eth_dev->dev_ops = &igbvf_eth_dev_ops;
600 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
601 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
603 /* for secondary processes, we don't initialise any further as primary
604 * has already done this work. Only check we don't need a different
606 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
607 if (eth_dev->data->scattered_rx)
608 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
612 pci_dev = eth_dev->pci_dev;
614 hw->device_id = pci_dev->id.device_id;
615 hw->vendor_id = pci_dev->id.vendor_id;
616 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
618 /* Initialize the shared code */
619 diag = e1000_setup_init_funcs(hw, TRUE);
621 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
626 /* init_mailbox_params */
627 hw->mbx.ops.init_params(hw);
629 /* Disable the interrupts for VF */
630 igbvf_intr_disable(hw);
632 diag = hw->mac.ops.reset_hw(hw);
634 /* Allocate memory for storing MAC addresses */
635 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
636 hw->mac.rar_entry_count, 0);
637 if (eth_dev->data->mac_addrs == NULL) {
639 "Failed to allocate %d bytes needed to store MAC "
641 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
645 /* Copy the permanent MAC address */
646 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
647 ð_dev->data->mac_addrs[0]);
649 PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x "
651 eth_dev->data->port_id, pci_dev->id.vendor_id,
652 pci_dev->id.device_id,
658 static struct eth_driver rte_igb_pmd = {
660 .name = "rte_igb_pmd",
661 .id_table = pci_id_igb_map,
662 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
664 .eth_dev_init = eth_igb_dev_init,
665 .dev_private_size = sizeof(struct e1000_adapter),
669 * virtual function driver struct
671 static struct eth_driver rte_igbvf_pmd = {
673 .name = "rte_igbvf_pmd",
674 .id_table = pci_id_igbvf_map,
675 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
677 .eth_dev_init = eth_igbvf_dev_init,
678 .dev_private_size = sizeof(struct e1000_adapter),
682 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
684 rte_eth_driver_register(&rte_igb_pmd);
689 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
691 struct e1000_hw *hw =
692 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
693 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
694 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
695 rctl |= E1000_RCTL_VFE;
696 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
700 * VF Driver initialization routine.
701 * Invoked one at EAL init time.
702 * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
705 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
707 DEBUGFUNC("rte_igbvf_pmd_init");
709 rte_eth_driver_register(&rte_igbvf_pmd);
714 eth_igb_configure(struct rte_eth_dev *dev)
716 struct e1000_interrupt *intr =
717 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
719 PMD_INIT_LOG(DEBUG, ">>");
721 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
723 PMD_INIT_LOG(DEBUG, "<<");
729 eth_igb_start(struct rte_eth_dev *dev)
731 struct e1000_hw *hw =
732 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
736 PMD_INIT_LOG(DEBUG, ">>");
738 /* Power up the phy. Needed to make the link go Up */
739 e1000_power_up_phy(hw);
742 * Packet Buffer Allocation (PBA)
743 * Writing PBA sets the receive portion of the buffer
744 * the remainder is used for the transmit buffer.
746 if (hw->mac.type == e1000_82575) {
749 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
750 E1000_WRITE_REG(hw, E1000_PBA, pba);
753 /* Put the address into the Receive Address Array */
754 e1000_rar_set(hw, hw->mac.addr, 0);
756 /* Initialize the hardware */
757 if (igb_hardware_init(hw)) {
758 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
762 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
764 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
765 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
766 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
767 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
768 E1000_WRITE_FLUSH(hw);
770 /* configure PF module if SRIOV enabled */
771 igb_pf_host_configure(dev);
773 /* Configure for OS presence */
774 igb_init_manageability(hw);
776 eth_igb_tx_init(dev);
778 /* This can fail when allocating mbufs for descriptor rings */
779 ret = eth_igb_rx_init(dev);
781 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
782 igb_dev_clear_queues(dev);
786 e1000_clear_hw_cntrs_base_generic(hw);
789 * VLAN Offload Settings
791 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
792 ETH_VLAN_EXTEND_MASK;
793 eth_igb_vlan_offload_set(dev, mask);
795 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
796 /* Enable VLAN filter since VMDq always use VLAN filter */
797 igb_vmdq_vlan_hw_filter_enable(dev);
801 * Configure the Interrupt Moderation register (EITR) with the maximum
802 * possible value (0xFFFF) to minimize "System Partial Write" issued by
803 * spurious [DMA] memory updates of RX and TX ring descriptors.
805 * With a EITR granularity of 2 microseconds in the 82576, only 7/8
806 * spurious memory updates per second should be expected.
807 * ((65535 * 2) / 1000.1000 ~= 0.131 second).
809 * Because interrupts are not used at all, the MSI-X is not activated
810 * and interrupt moderation is controlled by EITR[0].
812 * Note that having [almost] disabled memory updates of RX and TX ring
813 * descriptors through the Interrupt Moderation mechanism, memory
814 * updates of ring descriptors are now moderated by the configurable
815 * value of Write-Back Threshold registers.
817 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
818 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210)) {
821 /* Enable all RX & TX queues in the IVAR registers */
822 ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
823 for (i = 0; i < 8; i++)
824 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
826 /* Configure EITR with the maximum possible value (0xFFFF) */
827 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
830 /* Setup link speed and duplex */
831 switch (dev->data->dev_conf.link_speed) {
832 case ETH_LINK_SPEED_AUTONEG:
833 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
834 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
835 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
836 hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
837 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
838 hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
840 goto error_invalid_config;
842 case ETH_LINK_SPEED_10:
843 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
844 hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
845 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
846 hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
847 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
848 hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
850 goto error_invalid_config;
852 case ETH_LINK_SPEED_100:
853 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
854 hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
855 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
856 hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
857 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
858 hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
860 goto error_invalid_config;
862 case ETH_LINK_SPEED_1000:
863 if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
864 (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
865 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
867 goto error_invalid_config;
869 case ETH_LINK_SPEED_10000:
871 goto error_invalid_config;
873 e1000_setup_link(hw);
875 /* check if lsc interrupt feature is enabled */
876 if (dev->data->dev_conf.intr_conf.lsc != 0)
877 ret = eth_igb_lsc_interrupt_setup(dev);
879 /* resume enabled intr since hw reset */
880 igb_intr_enable(dev);
882 PMD_INIT_LOG(DEBUG, "<<");
886 error_invalid_config:
887 PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n",
888 dev->data->dev_conf.link_speed,
889 dev->data->dev_conf.link_duplex, dev->data->port_id);
890 igb_dev_clear_queues(dev);
894 /*********************************************************************
896 * This routine disables all traffic on the adapter by issuing a
897 * global reset on the MAC.
899 **********************************************************************/
901 eth_igb_stop(struct rte_eth_dev *dev)
903 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
904 struct rte_eth_link link;
906 igb_intr_disable(hw);
908 E1000_WRITE_REG(hw, E1000_WUC, 0);
910 /* Set bit for Go Link disconnect */
911 if (hw->mac.type >= e1000_82580) {
914 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
915 phpm_reg |= E1000_82580_PM_GO_LINKD;
916 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
919 /* Power down the phy. Needed to make the link go Down */
920 e1000_power_down_phy(hw);
922 igb_dev_clear_queues(dev);
924 /* clear the recorded link status */
925 memset(&link, 0, sizeof(link));
926 rte_igb_dev_atomic_write_link_status(dev, &link);
930 eth_igb_close(struct rte_eth_dev *dev)
932 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
933 struct rte_eth_link link;
936 e1000_phy_hw_reset(hw);
937 igb_release_manageability(hw);
938 igb_hw_control_release(hw);
940 /* Clear bit for Go Link disconnect */
941 if (hw->mac.type >= e1000_82580) {
944 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
945 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
946 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
949 igb_dev_clear_queues(dev);
951 memset(&link, 0, sizeof(link));
952 rte_igb_dev_atomic_write_link_status(dev, &link);
956 igb_get_rx_buffer_size(struct e1000_hw *hw)
958 uint32_t rx_buf_size;
959 if (hw->mac.type == e1000_82576) {
960 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
961 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
962 /* PBS needs to be translated according to a lookup table */
963 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
964 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
965 rx_buf_size = (rx_buf_size << 10);
966 } else if (hw->mac.type == e1000_i210) {
967 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
969 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
975 /*********************************************************************
977 * Initialize the hardware
979 **********************************************************************/
981 igb_hardware_init(struct e1000_hw *hw)
983 uint32_t rx_buf_size;
986 /* Let the firmware know the OS is in control */
987 igb_hw_control_acquire(hw);
990 * These parameters control the automatic generation (Tx) and
991 * response (Rx) to Ethernet PAUSE frames.
992 * - High water mark should allow for at least two standard size (1518)
993 * frames to be received after sending an XOFF.
994 * - Low water mark works best when it is very near the high water mark.
995 * This allows the receiver to restart by sending XON when it has
996 * drained a bit. Here we use an arbitrary value of 1500 which will
997 * restart after one full frame is pulled from the buffer. There
998 * could be several smaller frames in the buffer and if so they will
999 * not trigger the XON until their total number reduces the buffer
1001 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1003 rx_buf_size = igb_get_rx_buffer_size(hw);
1005 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1006 hw->fc.low_water = hw->fc.high_water - 1500;
1007 hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1008 hw->fc.send_xon = 1;
1010 /* Set Flow control, use the tunable location if sane */
1011 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1012 hw->fc.requested_mode = igb_fc_setting;
1014 hw->fc.requested_mode = e1000_fc_none;
1016 /* Issue a global reset */
1017 igb_pf_reset_hw(hw);
1018 E1000_WRITE_REG(hw, E1000_WUC, 0);
1020 diag = e1000_init_hw(hw);
1024 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1025 e1000_get_phy_info(hw);
1026 e1000_check_for_link(hw);
1031 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1033 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1035 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1036 struct e1000_hw_stats *stats =
1037 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1040 if(hw->phy.media_type == e1000_media_type_copper ||
1041 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1043 E1000_READ_REG(hw,E1000_SYMERRS);
1044 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1047 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1048 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1049 stats->scc += E1000_READ_REG(hw, E1000_SCC);
1050 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1052 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1053 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1054 stats->colc += E1000_READ_REG(hw, E1000_COLC);
1055 stats->dc += E1000_READ_REG(hw, E1000_DC);
1056 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1057 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1058 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1060 ** For watchdog management we need to know if we have been
1061 ** paused during the last interval, so capture that here.
1063 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1064 stats->xoffrxc += pause_frames;
1065 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1066 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1067 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1068 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1069 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1070 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1071 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1072 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1073 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1074 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1075 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1076 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1078 /* For the 64-bit byte counters the low dword must be read first. */
1079 /* Both registers clear on the read of the high dword */
1081 stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1082 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1083 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1084 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1086 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1087 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1088 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1089 stats->roc += E1000_READ_REG(hw, E1000_ROC);
1090 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1092 stats->tor += E1000_READ_REG(hw, E1000_TORH);
1093 stats->tot += E1000_READ_REG(hw, E1000_TOTH);
1095 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1096 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1097 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1098 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1099 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1100 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1101 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1102 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1103 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1104 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1106 /* Interrupt Counts */
1108 stats->iac += E1000_READ_REG(hw, E1000_IAC);
1109 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1110 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1111 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1112 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1113 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1114 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1115 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1116 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1118 /* Host to Card Statistics */
1120 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1121 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1122 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1123 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1124 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1125 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1126 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1127 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1128 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1129 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1130 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1131 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1132 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1133 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1135 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1136 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1137 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1138 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1139 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1140 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1142 if (rte_stats == NULL)
1146 rte_stats->ibadcrc = stats->crcerrs;
1147 rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
1148 rte_stats->imissed = stats->mpc;
1149 rte_stats->ierrors = rte_stats->ibadcrc +
1150 rte_stats->ibadlen +
1151 rte_stats->imissed +
1152 stats->rxerrc + stats->algnerrc + stats->cexterr;
1155 rte_stats->oerrors = stats->ecol + stats->latecol;
1157 /* XON/XOFF pause frames */
1158 rte_stats->tx_pause_xon = stats->xontxc;
1159 rte_stats->rx_pause_xon = stats->xonrxc;
1160 rte_stats->tx_pause_xoff = stats->xofftxc;
1161 rte_stats->rx_pause_xoff = stats->xoffrxc;
1163 rte_stats->ipackets = stats->gprc;
1164 rte_stats->opackets = stats->gptc;
1165 rte_stats->ibytes = stats->gorc;
1166 rte_stats->obytes = stats->gotc;
1170 eth_igb_stats_reset(struct rte_eth_dev *dev)
1172 struct e1000_hw_stats *hw_stats =
1173 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1175 /* HW registers are cleared on read */
1176 eth_igb_stats_get(dev, NULL);
1178 /* Reset software totals */
1179 memset(hw_stats, 0, sizeof(*hw_stats));
1183 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1185 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1186 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1187 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1189 /* Good Rx packets, include VF loopback */
1190 UPDATE_VF_STAT(E1000_VFGPRC,
1191 hw_stats->last_gprc, hw_stats->gprc);
1193 /* Good Rx octets, include VF loopback */
1194 UPDATE_VF_STAT(E1000_VFGORC,
1195 hw_stats->last_gorc, hw_stats->gorc);
1197 /* Good Tx packets, include VF loopback */
1198 UPDATE_VF_STAT(E1000_VFGPTC,
1199 hw_stats->last_gptc, hw_stats->gptc);
1201 /* Good Tx octets, include VF loopback */
1202 UPDATE_VF_STAT(E1000_VFGOTC,
1203 hw_stats->last_gotc, hw_stats->gotc);
1205 /* Rx Multicst packets */
1206 UPDATE_VF_STAT(E1000_VFMPRC,
1207 hw_stats->last_mprc, hw_stats->mprc);
1209 /* Good Rx loopback packets */
1210 UPDATE_VF_STAT(E1000_VFGPRLBC,
1211 hw_stats->last_gprlbc, hw_stats->gprlbc);
1213 /* Good Rx loopback octets */
1214 UPDATE_VF_STAT(E1000_VFGORLBC,
1215 hw_stats->last_gorlbc, hw_stats->gorlbc);
1217 /* Good Tx loopback packets */
1218 UPDATE_VF_STAT(E1000_VFGPTLBC,
1219 hw_stats->last_gptlbc, hw_stats->gptlbc);
1221 /* Good Tx loopback octets */
1222 UPDATE_VF_STAT(E1000_VFGOTLBC,
1223 hw_stats->last_gotlbc, hw_stats->gotlbc);
1225 if (rte_stats == NULL)
1228 memset(rte_stats, 0, sizeof(*rte_stats));
1229 rte_stats->ipackets = hw_stats->gprc;
1230 rte_stats->ibytes = hw_stats->gorc;
1231 rte_stats->opackets = hw_stats->gptc;
1232 rte_stats->obytes = hw_stats->gotc;
1233 rte_stats->imcasts = hw_stats->mprc;
1234 rte_stats->ilbpackets = hw_stats->gprlbc;
1235 rte_stats->ilbbytes = hw_stats->gorlbc;
1236 rte_stats->olbpackets = hw_stats->gptlbc;
1237 rte_stats->olbbytes = hw_stats->gotlbc;
1242 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1244 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1245 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1247 /* Sync HW register to the last stats */
1248 eth_igbvf_stats_get(dev, NULL);
1250 /* reset HW current stats*/
1251 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1252 offsetof(struct e1000_vf_stats, gprc));
1257 eth_igb_infos_get(struct rte_eth_dev *dev,
1258 struct rte_eth_dev_info *dev_info)
1260 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1262 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1263 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
1264 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1265 dev_info->rx_offload_capa =
1266 DEV_RX_OFFLOAD_VLAN_STRIP |
1267 DEV_RX_OFFLOAD_IPV4_CKSUM |
1268 DEV_RX_OFFLOAD_UDP_CKSUM |
1269 DEV_RX_OFFLOAD_TCP_CKSUM;
1270 dev_info->tx_offload_capa =
1271 DEV_TX_OFFLOAD_VLAN_INSERT |
1272 DEV_TX_OFFLOAD_IPV4_CKSUM |
1273 DEV_TX_OFFLOAD_UDP_CKSUM |
1274 DEV_TX_OFFLOAD_TCP_CKSUM |
1275 DEV_TX_OFFLOAD_SCTP_CKSUM;
1277 switch (hw->mac.type) {
1279 dev_info->max_rx_queues = 4;
1280 dev_info->max_tx_queues = 4;
1281 dev_info->max_vmdq_pools = 0;
1285 dev_info->max_rx_queues = 16;
1286 dev_info->max_tx_queues = 16;
1287 dev_info->max_vmdq_pools = ETH_8_POOLS;
1291 dev_info->max_rx_queues = 8;
1292 dev_info->max_tx_queues = 8;
1293 dev_info->max_vmdq_pools = ETH_8_POOLS;
1297 dev_info->max_rx_queues = 8;
1298 dev_info->max_tx_queues = 8;
1299 dev_info->max_vmdq_pools = ETH_8_POOLS;
1303 dev_info->max_rx_queues = 8;
1304 dev_info->max_tx_queues = 8;
1308 dev_info->max_rx_queues = 4;
1309 dev_info->max_tx_queues = 4;
1310 dev_info->max_vmdq_pools = 0;
1314 dev_info->max_rx_queues = 2;
1315 dev_info->max_tx_queues = 2;
1316 dev_info->max_vmdq_pools = 0;
1319 case e1000_vfadapt_i350:
1320 dev_info->max_rx_queues = 1;
1321 dev_info->max_tx_queues = 1;
1322 dev_info->max_vmdq_pools = 0;
1326 /* Should not happen */
1327 dev_info->max_rx_queues = 0;
1328 dev_info->max_tx_queues = 0;
1329 dev_info->max_vmdq_pools = 0;
1333 /* return 0 means link status changed, -1 means not changed */
1335 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1337 struct e1000_hw *hw =
1338 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1339 struct rte_eth_link link, old;
1340 int link_check, count;
1343 hw->mac.get_link_status = 1;
1345 /* possible wait-to-complete in up to 9 seconds */
1346 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
1347 /* Read the real link status */
1348 switch (hw->phy.media_type) {
1349 case e1000_media_type_copper:
1350 /* Do the work to read phy */
1351 e1000_check_for_link(hw);
1352 link_check = !hw->mac.get_link_status;
1355 case e1000_media_type_fiber:
1356 e1000_check_for_link(hw);
1357 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1361 case e1000_media_type_internal_serdes:
1362 e1000_check_for_link(hw);
1363 link_check = hw->mac.serdes_has_link;
1366 /* VF device is type_unknown */
1367 case e1000_media_type_unknown:
1368 eth_igbvf_link_update(hw);
1369 link_check = !hw->mac.get_link_status;
1375 if (link_check || wait_to_complete == 0)
1377 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
1379 memset(&link, 0, sizeof(link));
1380 rte_igb_dev_atomic_read_link_status(dev, &link);
1383 /* Now we check if a transition has happened */
1385 hw->mac.ops.get_link_up_info(hw, &link.link_speed,
1387 link.link_status = 1;
1388 } else if (!link_check) {
1389 link.link_speed = 0;
1390 link.link_duplex = 0;
1391 link.link_status = 0;
1393 rte_igb_dev_atomic_write_link_status(dev, &link);
1396 if (old.link_status == link.link_status)
1404 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
1405 * For ASF and Pass Through versions of f/w this means
1406 * that the driver is loaded.
1409 igb_hw_control_acquire(struct e1000_hw *hw)
1413 /* Let firmware know the driver has taken over */
1414 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1415 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1419 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
1420 * For ASF and Pass Through versions of f/w this means that the
1421 * driver is no longer loaded.
1424 igb_hw_control_release(struct e1000_hw *hw)
1428 /* Let firmware taken over control of h/w */
1429 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1430 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
1431 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1435 * Bit of a misnomer, what this really means is
1436 * to enable OS management of the system... aka
1437 * to disable special hardware management features.
1440 igb_init_manageability(struct e1000_hw *hw)
1442 if (e1000_enable_mng_pass_thru(hw)) {
1443 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
1444 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1446 /* disable hardware interception of ARP */
1447 manc &= ~(E1000_MANC_ARP_EN);
1449 /* enable receiving management packets to the host */
1450 manc |= E1000_MANC_EN_MNG2HOST;
1451 manc2h |= 1 << 5; /* Mng Port 623 */
1452 manc2h |= 1 << 6; /* Mng Port 664 */
1453 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
1454 E1000_WRITE_REG(hw, E1000_MANC, manc);
1459 igb_release_manageability(struct e1000_hw *hw)
1461 if (e1000_enable_mng_pass_thru(hw)) {
1462 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1464 manc |= E1000_MANC_ARP_EN;
1465 manc &= ~E1000_MANC_EN_MNG2HOST;
1467 E1000_WRITE_REG(hw, E1000_MANC, manc);
1472 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
1474 struct e1000_hw *hw =
1475 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1478 rctl = E1000_READ_REG(hw, E1000_RCTL);
1479 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1480 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1484 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
1486 struct e1000_hw *hw =
1487 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1490 rctl = E1000_READ_REG(hw, E1000_RCTL);
1491 rctl &= (~E1000_RCTL_UPE);
1492 if (dev->data->all_multicast == 1)
1493 rctl |= E1000_RCTL_MPE;
1495 rctl &= (~E1000_RCTL_MPE);
1496 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1500 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
1502 struct e1000_hw *hw =
1503 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1506 rctl = E1000_READ_REG(hw, E1000_RCTL);
1507 rctl |= E1000_RCTL_MPE;
1508 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1512 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
1514 struct e1000_hw *hw =
1515 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1518 if (dev->data->promiscuous == 1)
1519 return; /* must remain in all_multicast mode */
1520 rctl = E1000_READ_REG(hw, E1000_RCTL);
1521 rctl &= (~E1000_RCTL_MPE);
1522 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1526 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1528 struct e1000_hw *hw =
1529 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1530 struct e1000_vfta * shadow_vfta =
1531 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1536 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
1537 E1000_VFTA_ENTRY_MASK);
1538 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
1539 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
1544 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
1546 /* update local VFTA copy */
1547 shadow_vfta->vfta[vid_idx] = vfta;
1553 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1555 struct e1000_hw *hw =
1556 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1557 uint32_t reg = ETHER_TYPE_VLAN ;
1559 reg |= (tpid << 16);
1560 E1000_WRITE_REG(hw, E1000_VET, reg);
1564 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1566 struct e1000_hw *hw =
1567 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1570 /* Filter Table Disable */
1571 reg = E1000_READ_REG(hw, E1000_RCTL);
1572 reg &= ~E1000_RCTL_CFIEN;
1573 reg &= ~E1000_RCTL_VFE;
1574 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1578 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1580 struct e1000_hw *hw =
1581 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1582 struct e1000_vfta * shadow_vfta =
1583 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1587 /* Filter Table Enable, CFI not used for packet acceptance */
1588 reg = E1000_READ_REG(hw, E1000_RCTL);
1589 reg &= ~E1000_RCTL_CFIEN;
1590 reg |= E1000_RCTL_VFE;
1591 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1593 /* restore VFTA table */
1594 for (i = 0; i < IGB_VFTA_SIZE; i++)
1595 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
1599 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
1601 struct e1000_hw *hw =
1602 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1605 /* VLAN Mode Disable */
1606 reg = E1000_READ_REG(hw, E1000_CTRL);
1607 reg &= ~E1000_CTRL_VME;
1608 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1612 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
1614 struct e1000_hw *hw =
1615 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1618 /* VLAN Mode Enable */
1619 reg = E1000_READ_REG(hw, E1000_CTRL);
1620 reg |= E1000_CTRL_VME;
1621 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1625 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1627 struct e1000_hw *hw =
1628 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1631 /* CTRL_EXT: Extended VLAN */
1632 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1633 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
1634 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1636 /* Update maximum packet length */
1637 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1638 E1000_WRITE_REG(hw, E1000_RLPML,
1639 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1644 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1646 struct e1000_hw *hw =
1647 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1650 /* CTRL_EXT: Extended VLAN */
1651 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1652 reg |= E1000_CTRL_EXT_EXTEND_VLAN;
1653 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1655 /* Update maximum packet length */
1656 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1657 E1000_WRITE_REG(hw, E1000_RLPML,
1658 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1663 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1665 if(mask & ETH_VLAN_STRIP_MASK){
1666 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1667 igb_vlan_hw_strip_enable(dev);
1669 igb_vlan_hw_strip_disable(dev);
1672 if(mask & ETH_VLAN_FILTER_MASK){
1673 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1674 igb_vlan_hw_filter_enable(dev);
1676 igb_vlan_hw_filter_disable(dev);
1679 if(mask & ETH_VLAN_EXTEND_MASK){
1680 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1681 igb_vlan_hw_extend_enable(dev);
1683 igb_vlan_hw_extend_disable(dev);
1689 * It enables the interrupt mask and then enable the interrupt.
1692 * Pointer to struct rte_eth_dev.
1695 * - On success, zero.
1696 * - On failure, a negative value.
1699 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
1701 struct e1000_interrupt *intr =
1702 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1704 intr->mask |= E1000_ICR_LSC;
1710 * It reads ICR and gets interrupt causes, check it and set a bit flag
1711 * to update link status.
1714 * Pointer to struct rte_eth_dev.
1717 * - On success, zero.
1718 * - On failure, a negative value.
1721 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
1724 struct e1000_hw *hw =
1725 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1726 struct e1000_interrupt *intr =
1727 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1729 igb_intr_disable(hw);
1731 /* read-on-clear nic registers here */
1732 icr = E1000_READ_REG(hw, E1000_ICR);
1735 if (icr & E1000_ICR_LSC) {
1736 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1739 if (icr & E1000_ICR_VMMB)
1740 intr->flags |= E1000_FLAG_MAILBOX;
1746 * It executes link_update after knowing an interrupt is prsent.
1749 * Pointer to struct rte_eth_dev.
1752 * - On success, zero.
1753 * - On failure, a negative value.
1756 eth_igb_interrupt_action(struct rte_eth_dev *dev)
1758 struct e1000_hw *hw =
1759 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1760 struct e1000_interrupt *intr =
1761 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1762 uint32_t tctl, rctl;
1763 struct rte_eth_link link;
1766 if (intr->flags & E1000_FLAG_MAILBOX) {
1767 igb_pf_mbx_process(dev);
1768 intr->flags &= ~E1000_FLAG_MAILBOX;
1771 igb_intr_enable(dev);
1772 rte_intr_enable(&(dev->pci_dev->intr_handle));
1774 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
1775 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
1777 /* set get_link_status to check register later */
1778 hw->mac.get_link_status = 1;
1779 ret = eth_igb_link_update(dev, 0);
1781 /* check if link has changed */
1785 memset(&link, 0, sizeof(link));
1786 rte_igb_dev_atomic_read_link_status(dev, &link);
1787 if (link.link_status) {
1789 " Port %d: Link Up - speed %u Mbps - %s\n",
1790 dev->data->port_id, (unsigned)link.link_speed,
1791 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1792 "full-duplex" : "half-duplex");
1794 PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
1795 dev->data->port_id);
1797 PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1798 dev->pci_dev->addr.domain,
1799 dev->pci_dev->addr.bus,
1800 dev->pci_dev->addr.devid,
1801 dev->pci_dev->addr.function);
1802 tctl = E1000_READ_REG(hw, E1000_TCTL);
1803 rctl = E1000_READ_REG(hw, E1000_RCTL);
1804 if (link.link_status) {
1806 tctl |= E1000_TCTL_EN;
1807 rctl |= E1000_RCTL_EN;
1810 tctl &= ~E1000_TCTL_EN;
1811 rctl &= ~E1000_RCTL_EN;
1813 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1814 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1815 E1000_WRITE_FLUSH(hw);
1816 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
1823 * Interrupt handler which shall be registered at first.
1826 * Pointer to interrupt handle.
1828 * The address of parameter (struct rte_eth_dev *) regsitered before.
1834 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
1837 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1839 eth_igb_interrupt_get_status(dev);
1840 eth_igb_interrupt_action(dev);
1844 eth_igb_led_on(struct rte_eth_dev *dev)
1846 struct e1000_hw *hw;
1848 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1849 return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
1853 eth_igb_led_off(struct rte_eth_dev *dev)
1855 struct e1000_hw *hw;
1857 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1858 return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
1862 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1864 struct e1000_hw *hw;
1869 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1870 fc_conf->pause_time = hw->fc.pause_time;
1871 fc_conf->high_water = hw->fc.high_water;
1872 fc_conf->low_water = hw->fc.low_water;
1873 fc_conf->send_xon = hw->fc.send_xon;
1876 * Return rx_pause and tx_pause status according to actual setting of
1877 * the TFCE and RFCE bits in the CTRL register.
1879 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1880 if (ctrl & E1000_CTRL_TFCE)
1885 if (ctrl & E1000_CTRL_RFCE)
1890 if (rx_pause && tx_pause)
1891 fc_conf->mode = RTE_FC_FULL;
1893 fc_conf->mode = RTE_FC_RX_PAUSE;
1895 fc_conf->mode = RTE_FC_TX_PAUSE;
1897 fc_conf->mode = RTE_FC_NONE;
1903 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1905 struct e1000_hw *hw;
1907 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
1913 uint32_t rx_buf_size;
1914 uint32_t max_high_water;
1917 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1918 rx_buf_size = igb_get_rx_buffer_size(hw);
1919 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
1921 /* At least reserve one Ethernet frame for watermark */
1922 max_high_water = rx_buf_size - ETHER_MAX_LEN;
1923 if ((fc_conf->high_water > max_high_water) ||
1924 (fc_conf->high_water < fc_conf->low_water)) {
1925 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n");
1926 PMD_INIT_LOG(ERR, "high water must <= 0x%x \n", max_high_water);
1930 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
1931 hw->fc.pause_time = fc_conf->pause_time;
1932 hw->fc.high_water = fc_conf->high_water;
1933 hw->fc.low_water = fc_conf->low_water;
1934 hw->fc.send_xon = fc_conf->send_xon;
1936 err = e1000_setup_link_generic(hw);
1937 if (err == E1000_SUCCESS) {
1939 /* check if we want to forward MAC frames - driver doesn't have native
1940 * capability to do that, so we'll write the registers ourselves */
1942 rctl = E1000_READ_REG(hw, E1000_RCTL);
1944 /* set or clear MFLCN.PMCF bit depending on configuration */
1945 if (fc_conf->mac_ctrl_frame_fwd != 0)
1946 rctl |= E1000_RCTL_PMCF;
1948 rctl &= ~E1000_RCTL_PMCF;
1950 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1951 E1000_WRITE_FLUSH(hw);
1956 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err);
1960 #define E1000_RAH_POOLSEL_SHIFT (18)
1962 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1963 uint32_t index, __rte_unused uint32_t pool)
1965 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1968 e1000_rar_set(hw, mac_addr->addr_bytes, index);
1969 rah = E1000_READ_REG(hw, E1000_RAH(index));
1970 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
1971 E1000_WRITE_REG(hw, E1000_RAH(index), rah);
1975 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
1977 uint8_t addr[ETHER_ADDR_LEN];
1978 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1980 memset(addr, 0, sizeof(addr));
1982 e1000_rar_set(hw, addr, index);
1986 * Virtual Function operations
1989 igbvf_intr_disable(struct e1000_hw *hw)
1991 PMD_INIT_LOG(DEBUG, "igbvf_intr_disable");
1993 /* Clear interrupt mask to stop from interrupts being generated */
1994 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
1996 E1000_WRITE_FLUSH(hw);
2000 igbvf_stop_adapter(struct rte_eth_dev *dev)
2004 struct rte_eth_dev_info dev_info;
2005 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2007 memset(&dev_info, 0, sizeof(dev_info));
2008 eth_igb_infos_get(dev, &dev_info);
2010 /* Clear interrupt mask to stop from interrupts being generated */
2011 igbvf_intr_disable(hw);
2013 /* Clear any pending interrupts, flush previous writes */
2014 E1000_READ_REG(hw, E1000_EICR);
2016 /* Disable the transmit unit. Each queue must be disabled. */
2017 for (i = 0; i < dev_info.max_tx_queues; i++)
2018 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
2020 /* Disable the receive unit by stopping each queue */
2021 for (i = 0; i < dev_info.max_rx_queues; i++) {
2022 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
2023 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
2024 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
2025 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
2029 /* flush all queues disables */
2030 E1000_WRITE_FLUSH(hw);
2034 static int eth_igbvf_link_update(struct e1000_hw *hw)
2036 struct e1000_mbx_info *mbx = &hw->mbx;
2037 struct e1000_mac_info *mac = &hw->mac;
2038 int ret_val = E1000_SUCCESS;
2040 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
2043 * We only want to run this if there has been a rst asserted.
2044 * in this case that could mean a link change, device reset,
2045 * or a virtual function reset
2048 /* If we were hit with a reset or timeout drop the link */
2049 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
2050 mac->get_link_status = TRUE;
2052 if (!mac->get_link_status)
2055 /* if link status is down no point in checking to see if pf is up */
2056 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
2059 /* if we passed all the tests above then the link is up and we no
2060 * longer need to check for link */
2061 mac->get_link_status = FALSE;
2069 igbvf_dev_configure(struct rte_eth_dev *dev)
2071 struct rte_eth_conf* conf = &dev->data->dev_conf;
2073 PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
2074 dev->data->port_id);
2077 * VF has no ability to enable/disable HW CRC
2078 * Keep the persistent behavior the same as Host PF
2080 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
2081 if (!conf->rxmode.hw_strip_crc) {
2082 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
2083 conf->rxmode.hw_strip_crc = 1;
2086 if (conf->rxmode.hw_strip_crc) {
2087 PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
2088 conf->rxmode.hw_strip_crc = 0;
2096 igbvf_dev_start(struct rte_eth_dev *dev)
2098 struct e1000_hw *hw =
2099 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2102 PMD_INIT_LOG(DEBUG, "igbvf_dev_start");
2104 hw->mac.ops.reset_hw(hw);
2107 igbvf_set_vfta_all(dev,1);
2109 eth_igbvf_tx_init(dev);
2111 /* This can fail when allocating mbufs for descriptor rings */
2112 ret = eth_igbvf_rx_init(dev);
2114 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2115 igb_dev_clear_queues(dev);
2123 igbvf_dev_stop(struct rte_eth_dev *dev)
2125 PMD_INIT_LOG(DEBUG, "igbvf_dev_stop");
2127 igbvf_stop_adapter(dev);
2130 * Clear what we set, but we still keep shadow_vfta to
2131 * restore after device starts
2133 igbvf_set_vfta_all(dev,0);
2135 igb_dev_clear_queues(dev);
2139 igbvf_dev_close(struct rte_eth_dev *dev)
2141 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2143 PMD_INIT_LOG(DEBUG, "igbvf_dev_close");
2147 igbvf_dev_stop(dev);
2150 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
2152 struct e1000_mbx_info *mbx = &hw->mbx;
2155 /* After set vlan, vlan strip will also be enabled in igb driver*/
2156 msgbuf[0] = E1000_VF_SET_VLAN;
2158 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
2160 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
2162 return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
2165 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
2167 struct e1000_hw *hw =
2168 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2169 struct e1000_vfta * shadow_vfta =
2170 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2171 int i = 0, j = 0, vfta = 0, mask = 1;
2173 for (i = 0; i < IGB_VFTA_SIZE; i++){
2174 vfta = shadow_vfta->vfta[i];
2177 for (j = 0; j < 32; j++){
2180 (uint16_t)((i<<5)+j), on);
2189 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2191 struct e1000_hw *hw =
2192 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2193 struct e1000_vfta * shadow_vfta =
2194 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2195 uint32_t vid_idx = 0;
2196 uint32_t vid_bit = 0;
2199 PMD_INIT_LOG(DEBUG, "igbvf_vlan_filter_set");
2201 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
2202 ret = igbvf_set_vfta(hw, vlan_id, !!on);
2204 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
2207 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
2208 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
2210 /*Save what we set and retore it after device reset*/
2212 shadow_vfta->vfta[vid_idx] |= vid_bit;
2214 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
2220 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
2221 struct rte_eth_rss_reta *reta_conf)
2225 struct e1000_hw *hw =
2226 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2229 * Update Redirection Table RETA[n],n=0...31,The redirection table has
2230 * 128-entries in 32 registers
2232 for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
2233 if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
2234 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
2236 mask = (uint8_t)((reta_conf->mask_hi >>
2237 (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
2240 /* If all 4 entries were set,don't need read RETA register */
2242 reta = E1000_READ_REG(hw,E1000_RETA(i >> 2));
2244 for (j = 0; j < 4; j++) {
2245 if (mask & (0x1 << j)) {
2247 reta &= ~(0xFF << 8 * j);
2248 reta |= reta_conf->reta[i + j] << 8 * j;
2251 E1000_WRITE_REG(hw, E1000_RETA(i >> 2),reta);
2259 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
2260 struct rte_eth_rss_reta *reta_conf)
2264 struct e1000_hw *hw =
2265 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2268 * Read Redirection Table RETA[n],n=0...31,The redirection table has
2269 * 128-entries in 32 registers
2271 for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
2272 if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
2273 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
2275 mask = (uint8_t)((reta_conf->mask_hi >>
2276 (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
2279 reta = E1000_READ_REG(hw,E1000_RETA(i >> 2));
2280 for (j = 0; j < 4; j++) {
2281 if (mask & (0x1 << j))
2282 reta_conf->reta[i + j] =
2283 (uint8_t)((reta >> 8 * j) & 0xFF);
2291 #define MAC_TYPE_FILTER_SUP(type) do {\
2292 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
2293 (type) != e1000_82576)\
2298 * add the syn filter
2301 * dev: Pointer to struct rte_eth_dev.
2302 * filter: ponter to the filter that will be added.
2303 * rx_queue: the queue id the filter assigned to.
2306 * - On success, zero.
2307 * - On failure, a negative value.
2310 eth_igb_add_syn_filter(struct rte_eth_dev *dev,
2311 struct rte_syn_filter *filter, uint16_t rx_queue)
2313 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2314 uint32_t synqf, rfctl;
2316 MAC_TYPE_FILTER_SUP(hw->mac.type);
2318 if (rx_queue >= IGB_MAX_RX_QUEUE_NUM)
2321 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2322 if (synqf & E1000_SYN_FILTER_ENABLE)
2325 synqf = (uint32_t)(((rx_queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
2326 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
2328 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2329 if (filter->hig_pri)
2330 rfctl |= E1000_RFCTL_SYNQFP;
2332 rfctl &= ~E1000_RFCTL_SYNQFP;
2334 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
2335 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
2340 * remove the syn filter
2343 * dev: Pointer to struct rte_eth_dev.
2346 * - On success, zero.
2347 * - On failure, a negative value.
2350 eth_igb_remove_syn_filter(struct rte_eth_dev *dev)
2352 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2354 MAC_TYPE_FILTER_SUP(hw->mac.type);
2356 E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
2361 * get the syn filter's info
2364 * dev: Pointer to struct rte_eth_dev.
2365 * filter: ponter to the filter that returns.
2366 * *rx_queue: pointer to the queue id the filter assigned to.
2369 * - On success, zero.
2370 * - On failure, a negative value.
2373 eth_igb_get_syn_filter(struct rte_eth_dev *dev,
2374 struct rte_syn_filter *filter, uint16_t *rx_queue)
2376 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2377 uint32_t synqf, rfctl;
2379 MAC_TYPE_FILTER_SUP(hw->mac.type);
2380 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2381 if (synqf & E1000_SYN_FILTER_ENABLE) {
2382 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2383 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
2384 *rx_queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
2385 E1000_SYN_FILTER_QUEUE_SHIFT);
2392 * add an ethertype filter
2395 * dev: Pointer to struct rte_eth_dev.
2396 * index: the index the filter allocates.
2397 * filter: ponter to the filter that will be added.
2398 * rx_queue: the queue id the filter assigned to.
2401 * - On success, zero.
2402 * - On failure, a negative value.
2405 eth_igb_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
2406 struct rte_ethertype_filter *filter, uint16_t rx_queue)
2408 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2411 MAC_TYPE_FILTER_SUP(hw->mac.type);
2413 if (index >= E1000_MAX_ETQF_FILTERS || rx_queue >= IGB_MAX_RX_QUEUE_NUM)
2416 etqf = E1000_READ_REG(hw, E1000_ETQF(index));
2417 if (etqf & E1000_ETQF_FILTER_ENABLE)
2418 return -EINVAL; /* filter index is in use. */
2422 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
2423 etqf |= (uint32_t)(filter->ethertype & E1000_ETQF_ETHERTYPE);
2424 etqf |= rx_queue << E1000_ETQF_QUEUE_SHIFT;
2426 if (filter->priority_en) {
2427 PMD_INIT_LOG(ERR, "vlan and priority (%d) is not supported"
2428 " in E1000.", filter->priority);
2432 E1000_WRITE_REG(hw, E1000_ETQF(index), etqf);
2437 * remove an ethertype filter
2440 * dev: Pointer to struct rte_eth_dev.
2441 * index: the index the filter allocates.
2444 * - On success, zero.
2445 * - On failure, a negative value.
2448 eth_igb_remove_ethertype_filter(struct rte_eth_dev *dev, uint16_t index)
2450 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2452 MAC_TYPE_FILTER_SUP(hw->mac.type);
2454 if (index >= E1000_MAX_ETQF_FILTERS)
2457 E1000_WRITE_REG(hw, E1000_ETQF(index), 0);
2462 * get an ethertype filter
2465 * dev: Pointer to struct rte_eth_dev.
2466 * index: the index the filter allocates.
2467 * filter: ponter to the filter that will be gotten.
2468 * *rx_queue: the ponited of the queue id the filter assigned to.
2471 * - On success, zero.
2472 * - On failure, a negative value.
2475 eth_igb_get_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
2476 struct rte_ethertype_filter *filter, uint16_t *rx_queue)
2478 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2481 MAC_TYPE_FILTER_SUP(hw->mac.type);
2483 if (index >= E1000_MAX_ETQF_FILTERS)
2486 etqf = E1000_READ_REG(hw, E1000_ETQF(index));
2487 if (etqf & E1000_ETQF_FILTER_ENABLE) {
2488 filter->ethertype = etqf & E1000_ETQF_ETHERTYPE;
2489 filter->priority_en = 0;
2490 *rx_queue = (etqf & E1000_ETQF_QUEUE) >> E1000_ETQF_QUEUE_SHIFT;
2496 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
2497 if ((type) != e1000_82580 && (type) != e1000_i350)\
2502 * add a 2tuple filter
2505 * dev: Pointer to struct rte_eth_dev.
2506 * index: the index the filter allocates.
2507 * filter: ponter to the filter that will be added.
2508 * rx_queue: the queue id the filter assigned to.
2511 * - On success, zero.
2512 * - On failure, a negative value.
2515 eth_igb_add_2tuple_filter(struct rte_eth_dev *dev, uint16_t index,
2516 struct rte_2tuple_filter *filter, uint16_t rx_queue)
2518 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2519 uint32_t ttqf, imir = 0;
2520 uint32_t imir_ext = 0;
2522 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
2524 if (index >= E1000_MAX_TTQF_FILTERS ||
2525 rx_queue >= IGB_MAX_RX_QUEUE_NUM ||
2526 filter->priority > E1000_2TUPLE_MAX_PRI)
2527 return -EINVAL; /* filter index is out of range. */
2528 if (filter->tcp_flags > TCP_FLAG_ALL)
2529 return -EINVAL; /* flags is invalid. */
2531 ttqf = E1000_READ_REG(hw, E1000_TTQF(index));
2532 if (ttqf & E1000_TTQF_QUEUE_ENABLE)
2533 return -EINVAL; /* filter index is in use. */
2535 imir = (uint32_t)(filter->dst_port & E1000_IMIR_DSTPORT);
2536 if (filter->dst_port_mask == 1) /* 1b means not compare. */
2537 imir |= E1000_IMIR_PORT_BP;
2539 imir &= ~E1000_IMIR_PORT_BP;
2541 imir |= filter->priority << E1000_IMIR_PRIORITY_SHIFT;
2544 ttqf |= E1000_TTQF_QUEUE_ENABLE;
2545 ttqf |= (uint32_t)(rx_queue << E1000_TTQF_QUEUE_SHIFT);
2546 ttqf |= (uint32_t)(filter->protocol & E1000_TTQF_PROTOCOL_MASK);
2547 if (filter->protocol_mask == 1)
2548 ttqf |= E1000_TTQF_MASK_ENABLE;
2550 ttqf &= ~E1000_TTQF_MASK_ENABLE;
2552 imir_ext |= E1000_IMIR_EXT_SIZE_BP;
2553 /* tcp flags bits setting. */
2554 if (filter->tcp_flags & TCP_FLAG_ALL) {
2555 if (filter->tcp_flags & TCP_UGR_FLAG)
2556 imir_ext |= E1000_IMIR_EXT_CTRL_UGR;
2557 if (filter->tcp_flags & TCP_ACK_FLAG)
2558 imir_ext |= E1000_IMIR_EXT_CTRL_ACK;
2559 if (filter->tcp_flags & TCP_PSH_FLAG)
2560 imir_ext |= E1000_IMIR_EXT_CTRL_PSH;
2561 if (filter->tcp_flags & TCP_RST_FLAG)
2562 imir_ext |= E1000_IMIR_EXT_CTRL_RST;
2563 if (filter->tcp_flags & TCP_SYN_FLAG)
2564 imir_ext |= E1000_IMIR_EXT_CTRL_SYN;
2565 if (filter->tcp_flags & TCP_FIN_FLAG)
2566 imir_ext |= E1000_IMIR_EXT_CTRL_FIN;
2567 imir_ext &= ~E1000_IMIR_EXT_CTRL_BP;
2569 imir_ext |= E1000_IMIR_EXT_CTRL_BP;
2570 E1000_WRITE_REG(hw, E1000_IMIR(index), imir);
2571 E1000_WRITE_REG(hw, E1000_TTQF(index), ttqf);
2572 E1000_WRITE_REG(hw, E1000_IMIREXT(index), imir_ext);
2577 * remove a 2tuple filter
2580 * dev: Pointer to struct rte_eth_dev.
2581 * index: the index the filter allocates.
2584 * - On success, zero.
2585 * - On failure, a negative value.
2588 eth_igb_remove_2tuple_filter(struct rte_eth_dev *dev,
2591 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2593 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
2595 if (index >= E1000_MAX_TTQF_FILTERS)
2596 return -EINVAL; /* filter index is out of range */
2598 E1000_WRITE_REG(hw, E1000_TTQF(index), 0);
2599 E1000_WRITE_REG(hw, E1000_IMIR(index), 0);
2600 E1000_WRITE_REG(hw, E1000_IMIREXT(index), 0);
2605 * get a 2tuple filter
2608 * dev: Pointer to struct rte_eth_dev.
2609 * index: the index the filter allocates.
2610 * filter: ponter to the filter that returns.
2611 * *rx_queue: pointer of the queue id the filter assigned to.
2614 * - On success, zero.
2615 * - On failure, a negative value.
2618 eth_igb_get_2tuple_filter(struct rte_eth_dev *dev, uint16_t index,
2619 struct rte_2tuple_filter *filter, uint16_t *rx_queue)
2621 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2622 uint32_t imir, ttqf, imir_ext;
2624 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
2626 if (index >= E1000_MAX_TTQF_FILTERS)
2627 return -EINVAL; /* filter index is out of range. */
2629 ttqf = E1000_READ_REG(hw, E1000_TTQF(index));
2630 if (ttqf & E1000_TTQF_QUEUE_ENABLE) {
2631 imir = E1000_READ_REG(hw, E1000_IMIR(index));
2632 filter->protocol = ttqf & E1000_TTQF_PROTOCOL_MASK;
2633 filter->protocol_mask = (ttqf & E1000_TTQF_MASK_ENABLE) ? 1 : 0;
2634 *rx_queue = (ttqf & E1000_TTQF_RX_QUEUE_MASK) >>
2635 E1000_TTQF_QUEUE_SHIFT;
2636 filter->dst_port = (uint16_t)(imir & E1000_IMIR_DSTPORT);
2637 filter->dst_port_mask = (imir & E1000_IMIR_PORT_BP) ? 1 : 0;
2638 filter->priority = (imir & E1000_IMIR_PRIORITY) >>
2639 E1000_IMIR_PRIORITY_SHIFT;
2641 imir_ext = E1000_READ_REG(hw, E1000_IMIREXT(index));
2642 if (!(imir_ext & E1000_IMIR_EXT_CTRL_BP)) {
2643 if (imir_ext & E1000_IMIR_EXT_CTRL_UGR)
2644 filter->tcp_flags |= TCP_UGR_FLAG;
2645 if (imir_ext & E1000_IMIR_EXT_CTRL_ACK)
2646 filter->tcp_flags |= TCP_ACK_FLAG;
2647 if (imir_ext & E1000_IMIR_EXT_CTRL_PSH)
2648 filter->tcp_flags |= TCP_PSH_FLAG;
2649 if (imir_ext & E1000_IMIR_EXT_CTRL_RST)
2650 filter->tcp_flags |= TCP_RST_FLAG;
2651 if (imir_ext & E1000_IMIR_EXT_CTRL_SYN)
2652 filter->tcp_flags |= TCP_SYN_FLAG;
2653 if (imir_ext & E1000_IMIR_EXT_CTRL_FIN)
2654 filter->tcp_flags |= TCP_FIN_FLAG;
2656 filter->tcp_flags = 0;
2666 * dev: Pointer to struct rte_eth_dev.
2667 * index: the index the filter allocates.
2668 * filter: ponter to the filter that will be added.
2669 * rx_queue: the queue id the filter assigned to.
2672 * - On success, zero.
2673 * - On failure, a negative value.
2676 eth_igb_add_flex_filter(struct rte_eth_dev *dev, uint16_t index,
2677 struct rte_flex_filter *filter, uint16_t rx_queue)
2679 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2680 uint32_t wufc, en_bits = 0;
2681 uint32_t queueing = 0;
2682 uint32_t reg_off = 0;
2685 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
2687 if (index >= E1000_MAX_FLEXIBLE_FILTERS)
2688 return -EINVAL; /* filter index is out of range. */
2690 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
2691 filter->len % 8 != 0 ||
2692 filter->priority > E1000_MAX_FLEX_FILTER_PRI)
2695 wufc = E1000_READ_REG(hw, E1000_WUFC);
2696 en_bits = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index);
2697 if ((wufc & en_bits) == en_bits)
2698 return -EINVAL; /* the filter is in use. */
2700 E1000_WRITE_REG(hw, E1000_WUFC,
2701 wufc | E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index));
2704 if (index < E1000_MAX_FHFT)
2705 reg_off = E1000_FHFT(index);
2707 reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT);
2709 for (i = 0; i < 16; i++) {
2710 E1000_WRITE_REG(hw, reg_off + i*4*4, filter->dwords[j]);
2711 E1000_WRITE_REG(hw, reg_off + (i*4+1)*4, filter->dwords[++j]);
2712 E1000_WRITE_REG(hw, reg_off + (i*4+2)*4,
2713 (uint32_t)filter->mask[i]);
2716 queueing |= filter->len |
2717 (rx_queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
2718 (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
2719 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, queueing);
2724 * remove a flex filter
2727 * dev: Pointer to struct rte_eth_dev.
2728 * index: the index the filter allocates.
2731 * - On success, zero.
2732 * - On failure, a negative value.
2735 eth_igb_remove_flex_filter(struct rte_eth_dev *dev,
2738 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2739 uint32_t wufc, reg_off = 0;
2742 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
2744 if (index >= E1000_MAX_FLEXIBLE_FILTERS)
2745 return -EINVAL; /* filter index is out of range. */
2747 wufc = E1000_READ_REG(hw, E1000_WUFC);
2748 E1000_WRITE_REG(hw, E1000_WUFC, wufc & (~(E1000_WUFC_FLX0 << index)));
2750 if (index < E1000_MAX_FHFT)
2751 reg_off = E1000_FHFT(index);
2753 reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT);
2755 for (i = 0; i < 64; i++)
2756 E1000_WRITE_REG(hw, reg_off + i*4, 0);
2764 * dev: Pointer to struct rte_eth_dev.
2765 * index: the index the filter allocates.
2766 * filter: ponter to the filter that returns.
2767 * *rx_queue: the pointer of the queue id the filter assigned to.
2770 * - On success, zero.
2771 * - On failure, a negative value.
2774 eth_igb_get_flex_filter(struct rte_eth_dev *dev, uint16_t index,
2775 struct rte_flex_filter *filter, uint16_t *rx_queue)
2777 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2778 uint32_t wufc, queueing, wufc_en = 0;
2781 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
2783 if (index >= E1000_MAX_FLEXIBLE_FILTERS)
2784 return -EINVAL; /* filter index is out of range. */
2786 wufc = E1000_READ_REG(hw, E1000_WUFC);
2787 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index);
2789 if ((wufc & wufc_en) == wufc_en) {
2790 uint32_t reg_off = 0;
2792 if (index < E1000_MAX_FHFT)
2793 reg_off = E1000_FHFT(index);
2795 reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT);
2797 for (i = 0; i < 16; i++, j = i * 2) {
2799 E1000_READ_REG(hw, reg_off + i*4*4);
2800 filter->dwords[j+1] =
2801 E1000_READ_REG(hw, reg_off + (i*4+1)*4);
2803 E1000_READ_REG(hw, reg_off + (i*4+2)*4);
2805 queueing = E1000_READ_REG(hw,
2806 reg_off + E1000_FHFT_QUEUEING_OFFSET);
2807 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
2808 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
2809 E1000_FHFT_QUEUEING_PRIO_SHIFT;
2810 *rx_queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
2811 E1000_FHFT_QUEUEING_QUEUE_SHIFT;
2818 * add a 5tuple filter
2821 * dev: Pointer to struct rte_eth_dev.
2822 * index: the index the filter allocates.
2823 * filter: ponter to the filter that will be added.
2824 * rx_queue: the queue id the filter assigned to.
2827 * - On success, zero.
2828 * - On failure, a negative value.
2831 eth_igb_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
2832 struct rte_5tuple_filter *filter, uint16_t rx_queue)
2834 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2835 uint32_t ftqf, spqf = 0;
2837 uint32_t imir_ext = 0;
2839 if (hw->mac.type != e1000_82576)
2842 if (index >= E1000_MAX_FTQF_FILTERS ||
2843 rx_queue >= IGB_MAX_RX_QUEUE_NUM_82576)
2844 return -EINVAL; /* filter index is out of range. */
2846 ftqf = E1000_READ_REG(hw, E1000_FTQF(index));
2847 if (ftqf & E1000_FTQF_QUEUE_ENABLE)
2848 return -EINVAL; /* filter index is in use. */
2851 ftqf |= filter->protocol & E1000_FTQF_PROTOCOL_MASK;
2852 if (filter->src_ip_mask == 1) /* 1b means not compare. */
2853 ftqf |= E1000_FTQF_SOURCE_ADDR_MASK;
2854 if (filter->dst_ip_mask == 1)
2855 ftqf |= E1000_FTQF_DEST_ADDR_MASK;
2856 if (filter->src_port_mask == 1)
2857 ftqf |= E1000_FTQF_SOURCE_PORT_MASK;
2858 if (filter->protocol_mask == 1)
2859 ftqf |= E1000_FTQF_PROTOCOL_COMP_MASK;
2860 ftqf |= (rx_queue << E1000_FTQF_QUEUE_SHIFT) & E1000_FTQF_QUEUE_MASK;
2861 ftqf |= E1000_FTQF_VF_MASK_EN;
2862 ftqf |= E1000_FTQF_QUEUE_ENABLE;
2863 E1000_WRITE_REG(hw, E1000_FTQF(index), ftqf);
2864 E1000_WRITE_REG(hw, E1000_DAQF(index), filter->dst_ip);
2865 E1000_WRITE_REG(hw, E1000_SAQF(index), filter->src_ip);
2867 spqf |= filter->src_port & E1000_SPQF_SRCPORT;
2868 E1000_WRITE_REG(hw, E1000_SPQF(index), spqf);
2870 imir |= (uint32_t)(filter->dst_port & E1000_IMIR_DSTPORT);
2871 if (filter->dst_port_mask == 1) /* 1b means not compare. */
2872 imir |= E1000_IMIR_PORT_BP;
2874 imir &= ~E1000_IMIR_PORT_BP;
2875 imir |= filter->priority << E1000_IMIR_PRIORITY_SHIFT;
2877 imir_ext |= E1000_IMIR_EXT_SIZE_BP;
2878 /* tcp flags bits setting. */
2879 if (filter->tcp_flags & TCP_FLAG_ALL) {
2880 if (filter->tcp_flags & TCP_UGR_FLAG)
2881 imir_ext |= E1000_IMIR_EXT_CTRL_UGR;
2882 if (filter->tcp_flags & TCP_ACK_FLAG)
2883 imir_ext |= E1000_IMIR_EXT_CTRL_ACK;
2884 if (filter->tcp_flags & TCP_PSH_FLAG)
2885 imir_ext |= E1000_IMIR_EXT_CTRL_PSH;
2886 if (filter->tcp_flags & TCP_RST_FLAG)
2887 imir_ext |= E1000_IMIR_EXT_CTRL_RST;
2888 if (filter->tcp_flags & TCP_SYN_FLAG)
2889 imir_ext |= E1000_IMIR_EXT_CTRL_SYN;
2890 if (filter->tcp_flags & TCP_FIN_FLAG)
2891 imir_ext |= E1000_IMIR_EXT_CTRL_FIN;
2893 imir_ext |= E1000_IMIR_EXT_CTRL_BP;
2894 E1000_WRITE_REG(hw, E1000_IMIR(index), imir);
2895 E1000_WRITE_REG(hw, E1000_IMIREXT(index), imir_ext);
2900 * remove a 5tuple filter
2903 * dev: Pointer to struct rte_eth_dev.
2904 * index: the index the filter allocates
2907 * - On success, zero.
2908 * - On failure, a negative value.
2911 eth_igb_remove_5tuple_filter(struct rte_eth_dev *dev,
2914 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2916 if (hw->mac.type != e1000_82576)
2919 if (index >= E1000_MAX_FTQF_FILTERS)
2920 return -EINVAL; /* filter index is out of range. */
2922 E1000_WRITE_REG(hw, E1000_FTQF(index), 0);
2923 E1000_WRITE_REG(hw, E1000_DAQF(index), 0);
2924 E1000_WRITE_REG(hw, E1000_SAQF(index), 0);
2925 E1000_WRITE_REG(hw, E1000_SPQF(index), 0);
2926 E1000_WRITE_REG(hw, E1000_IMIR(index), 0);
2927 E1000_WRITE_REG(hw, E1000_IMIREXT(index), 0);
2932 * get a 5tuple filter
2935 * dev: Pointer to struct rte_eth_dev.
2936 * index: the index the filter allocates
2937 * filter: ponter to the filter that returns
2938 * *rx_queue: pointer of the queue id the filter assigned to
2941 * - On success, zero.
2942 * - On failure, a negative value.
2945 eth_igb_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
2946 struct rte_5tuple_filter *filter, uint16_t *rx_queue)
2948 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2949 uint32_t spqf, ftqf, imir, imir_ext;
2951 if (hw->mac.type != e1000_82576)
2954 if (index >= E1000_MAX_FTQF_FILTERS)
2955 return -EINVAL; /* filter index is out of range. */
2957 ftqf = E1000_READ_REG(hw, E1000_FTQF(index));
2958 if (ftqf & E1000_FTQF_QUEUE_ENABLE) {
2959 filter->src_ip_mask =
2960 (ftqf & E1000_FTQF_SOURCE_ADDR_MASK) ? 1 : 0;
2961 filter->dst_ip_mask =
2962 (ftqf & E1000_FTQF_DEST_ADDR_MASK) ? 1 : 0;
2963 filter->src_port_mask =
2964 (ftqf & E1000_FTQF_SOURCE_PORT_MASK) ? 1 : 0;
2965 filter->protocol_mask =
2966 (ftqf & E1000_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0;
2968 (uint8_t)ftqf & E1000_FTQF_PROTOCOL_MASK;
2969 *rx_queue = (uint16_t)((ftqf & E1000_FTQF_QUEUE_MASK) >>
2970 E1000_FTQF_QUEUE_SHIFT);
2972 spqf = E1000_READ_REG(hw, E1000_SPQF(index));
2973 filter->src_port = spqf & E1000_SPQF_SRCPORT;
2975 filter->dst_ip = E1000_READ_REG(hw, E1000_DAQF(index));
2976 filter->src_ip = E1000_READ_REG(hw, E1000_SAQF(index));
2978 imir = E1000_READ_REG(hw, E1000_IMIR(index));
2979 filter->dst_port_mask = (imir & E1000_IMIR_PORT_BP) ? 1 : 0;
2980 filter->dst_port = (uint16_t)(imir & E1000_IMIR_DSTPORT);
2981 filter->priority = (imir & E1000_IMIR_PRIORITY) >>
2982 E1000_IMIR_PRIORITY_SHIFT;
2984 imir_ext = E1000_READ_REG(hw, E1000_IMIREXT(index));
2985 if (!(imir_ext & E1000_IMIR_EXT_CTRL_BP)) {
2986 if (imir_ext & E1000_IMIR_EXT_CTRL_UGR)
2987 filter->tcp_flags |= TCP_UGR_FLAG;
2988 if (imir_ext & E1000_IMIR_EXT_CTRL_ACK)
2989 filter->tcp_flags |= TCP_ACK_FLAG;
2990 if (imir_ext & E1000_IMIR_EXT_CTRL_PSH)
2991 filter->tcp_flags |= TCP_PSH_FLAG;
2992 if (imir_ext & E1000_IMIR_EXT_CTRL_RST)
2993 filter->tcp_flags |= TCP_RST_FLAG;
2994 if (imir_ext & E1000_IMIR_EXT_CTRL_SYN)
2995 filter->tcp_flags |= TCP_SYN_FLAG;
2996 if (imir_ext & E1000_IMIR_EXT_CTRL_FIN)
2997 filter->tcp_flags |= TCP_FIN_FLAG;
2999 filter->tcp_flags = 0;
3005 static struct rte_driver pmd_igb_drv = {
3007 .init = rte_igb_pmd_init,
3010 static struct rte_driver pmd_igbvf_drv = {
3012 .init = rte_igbvf_pmd_init,
3015 PMD_REGISTER_DRIVER(pmd_igb_drv);
3016 PMD_REGISTER_DRIVER(pmd_igbvf_drv);