4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
65 #include "ixgbe_logs.h"
66 #include "ixgbe/ixgbe_api.h"
67 #include "ixgbe/ixgbe_vf.h"
68 #include "ixgbe/ixgbe_common.h"
69 #include "ixgbe_ethdev.h"
70 #include "ixgbe_bypass.h"
71 #include "ixgbe_rxtx.h"
74 * High threshold controlling when to start sending XOFF frames. Must be at
75 * least 8 bytes less than receive packet buffer size. This value is in units
78 #define IXGBE_FC_HI 0x80
81 * Low threshold controlling when to start sending XON frames. This value is
82 * in units of 1024 bytes.
84 #define IXGBE_FC_LO 0x40
86 /* Timer value included in XOFF frames. */
87 #define IXGBE_FC_PAUSE 0x680
89 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
90 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
91 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
93 #define IXGBE_MMW_SIZE_DEFAULT 0x4
94 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14
97 * Default values for RX/TX configuration
99 #define IXGBE_DEFAULT_RX_FREE_THRESH 32
100 #define IXGBE_DEFAULT_RX_PTHRESH 8
101 #define IXGBE_DEFAULT_RX_HTHRESH 8
102 #define IXGBE_DEFAULT_RX_WTHRESH 0
104 #define IXGBE_DEFAULT_TX_FREE_THRESH 32
105 #define IXGBE_DEFAULT_TX_PTHRESH 32
106 #define IXGBE_DEFAULT_TX_HTHRESH 0
107 #define IXGBE_DEFAULT_TX_WTHRESH 0
108 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
110 /* Bit shift and mask */
111 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2)
112 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
113 #define IXGBE_8_BIT_WIDTH CHAR_BIT
114 #define IXGBE_8_BIT_MASK UINT8_MAX
116 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
118 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
120 static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
121 struct rte_eth_dev *eth_dev);
122 static int ixgbe_dev_configure(struct rte_eth_dev *dev);
123 static int ixgbe_dev_start(struct rte_eth_dev *dev);
124 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
125 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
126 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
127 static void ixgbe_dev_close(struct rte_eth_dev *dev);
128 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
129 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
130 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
131 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
132 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
133 int wait_to_complete);
134 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
135 struct rte_eth_stats *stats);
136 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
137 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
141 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
142 struct rte_eth_dev_info *dev_info);
143 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
144 struct rte_eth_dev_info *dev_info);
145 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
147 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
148 uint16_t vlan_id, int on);
149 static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
150 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
151 uint16_t queue, bool on);
152 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
154 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
155 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
156 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
157 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
158 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
160 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
161 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
162 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
163 struct rte_eth_fc_conf *fc_conf);
164 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
165 struct rte_eth_fc_conf *fc_conf);
166 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
167 struct rte_eth_pfc_conf *pfc_conf);
168 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
169 struct rte_eth_rss_reta_entry64 *reta_conf,
171 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
172 struct rte_eth_rss_reta_entry64 *reta_conf,
174 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
175 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
176 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
177 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
178 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
180 static void ixgbe_dev_interrupt_delayed_handler(void *param);
181 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
182 uint32_t index, uint32_t pool);
183 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
184 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
186 /* For Virtual Function support */
187 static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
188 struct rte_eth_dev *eth_dev);
189 static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
190 static int ixgbevf_dev_start(struct rte_eth_dev *dev);
191 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
192 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
193 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
194 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
195 struct rte_eth_stats *stats);
196 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
197 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
198 uint16_t vlan_id, int on);
199 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
200 uint16_t queue, int on);
201 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
202 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
204 /* For Eth VMDQ APIs support */
205 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
206 ether_addr* mac_addr,uint8_t on);
207 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
208 static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
209 uint16_t rx_mask, uint8_t on);
210 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
211 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
212 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
213 uint64_t pool_mask,uint8_t vlan_on);
214 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
215 struct rte_eth_vmdq_mirror_conf *mirror_conf,
216 uint8_t rule_id, uint8_t on);
217 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
220 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
221 uint16_t queue_idx, uint16_t tx_rate);
222 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
223 uint16_t tx_rate, uint64_t q_msk);
225 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
226 struct ether_addr *mac_addr,
227 uint32_t index, uint32_t pool);
228 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
229 static int ixgbe_add_syn_filter(struct rte_eth_dev *dev,
230 struct rte_syn_filter *filter, uint16_t rx_queue);
231 static int ixgbe_remove_syn_filter(struct rte_eth_dev *dev);
232 static int ixgbe_get_syn_filter(struct rte_eth_dev *dev,
233 struct rte_syn_filter *filter, uint16_t *rx_queue);
234 static int ixgbe_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
235 struct rte_ethertype_filter *filter, uint16_t rx_queue);
236 static int ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev,
238 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
239 struct rte_ethertype_filter *filter, uint16_t *rx_queue);
240 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
241 struct rte_5tuple_filter *filter, uint16_t rx_queue);
242 static int ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
244 static int ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
245 struct rte_5tuple_filter *filter, uint16_t *rx_queue);
247 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
250 * Define VF Stats MACRO for Non "cleared on read" register
252 #define UPDATE_VF_STAT(reg, last, cur) \
254 u32 latest = IXGBE_READ_REG(hw, reg); \
255 cur += latest - last; \
259 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \
261 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \
262 u64 new_msb = IXGBE_READ_REG(hw, msb); \
263 u64 latest = ((new_msb << 32) | new_lsb); \
264 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
268 #define IXGBE_SET_HWSTRIP(h, q) do{\
269 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
270 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
271 (h)->bitmap[idx] |= 1 << bit;\
274 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
275 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
276 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
277 (h)->bitmap[idx] &= ~(1 << bit);\
280 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
281 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
282 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
283 (r) = (h)->bitmap[idx] >> bit & 1;\
287 * The set of PCI devices this driver supports
289 static struct rte_pci_id pci_id_ixgbe_map[] = {
291 #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
292 #include "rte_pci_dev_ids.h"
294 { .vendor_id = 0, /* sentinel */ },
299 * The set of PCI devices this driver supports (for 82599 VF)
301 static struct rte_pci_id pci_id_ixgbevf_map[] = {
303 #define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
304 #include "rte_pci_dev_ids.h"
305 { .vendor_id = 0, /* sentinel */ },
309 static struct eth_dev_ops ixgbe_eth_dev_ops = {
310 .dev_configure = ixgbe_dev_configure,
311 .dev_start = ixgbe_dev_start,
312 .dev_stop = ixgbe_dev_stop,
313 .dev_set_link_up = ixgbe_dev_set_link_up,
314 .dev_set_link_down = ixgbe_dev_set_link_down,
315 .dev_close = ixgbe_dev_close,
316 .promiscuous_enable = ixgbe_dev_promiscuous_enable,
317 .promiscuous_disable = ixgbe_dev_promiscuous_disable,
318 .allmulticast_enable = ixgbe_dev_allmulticast_enable,
319 .allmulticast_disable = ixgbe_dev_allmulticast_disable,
320 .link_update = ixgbe_dev_link_update,
321 .stats_get = ixgbe_dev_stats_get,
322 .stats_reset = ixgbe_dev_stats_reset,
323 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
324 .dev_infos_get = ixgbe_dev_info_get,
325 .mtu_set = ixgbe_dev_mtu_set,
326 .vlan_filter_set = ixgbe_vlan_filter_set,
327 .vlan_tpid_set = ixgbe_vlan_tpid_set,
328 .vlan_offload_set = ixgbe_vlan_offload_set,
329 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
330 .rx_queue_start = ixgbe_dev_rx_queue_start,
331 .rx_queue_stop = ixgbe_dev_rx_queue_stop,
332 .tx_queue_start = ixgbe_dev_tx_queue_start,
333 .tx_queue_stop = ixgbe_dev_tx_queue_stop,
334 .rx_queue_setup = ixgbe_dev_rx_queue_setup,
335 .rx_queue_release = ixgbe_dev_rx_queue_release,
336 .rx_queue_count = ixgbe_dev_rx_queue_count,
337 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
338 .tx_queue_setup = ixgbe_dev_tx_queue_setup,
339 .tx_queue_release = ixgbe_dev_tx_queue_release,
340 .dev_led_on = ixgbe_dev_led_on,
341 .dev_led_off = ixgbe_dev_led_off,
342 .flow_ctrl_get = ixgbe_flow_ctrl_get,
343 .flow_ctrl_set = ixgbe_flow_ctrl_set,
344 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
345 .mac_addr_add = ixgbe_add_rar,
346 .mac_addr_remove = ixgbe_remove_rar,
347 .uc_hash_table_set = ixgbe_uc_hash_table_set,
348 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
349 .mirror_rule_set = ixgbe_mirror_rule_set,
350 .mirror_rule_reset = ixgbe_mirror_rule_reset,
351 .set_vf_rx_mode = ixgbe_set_pool_rx_mode,
352 .set_vf_rx = ixgbe_set_pool_rx,
353 .set_vf_tx = ixgbe_set_pool_tx,
354 .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter,
355 .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
356 .set_vf_rate_limit = ixgbe_set_vf_rate_limit,
357 .fdir_add_signature_filter = ixgbe_fdir_add_signature_filter,
358 .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
359 .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
360 .fdir_infos_get = ixgbe_fdir_info_get,
361 .fdir_add_perfect_filter = ixgbe_fdir_add_perfect_filter,
362 .fdir_update_perfect_filter = ixgbe_fdir_update_perfect_filter,
363 .fdir_remove_perfect_filter = ixgbe_fdir_remove_perfect_filter,
364 .fdir_set_masks = ixgbe_fdir_set_masks,
365 .reta_update = ixgbe_dev_rss_reta_update,
366 .reta_query = ixgbe_dev_rss_reta_query,
367 #ifdef RTE_NIC_BYPASS
368 .bypass_init = ixgbe_bypass_init,
369 .bypass_state_set = ixgbe_bypass_state_store,
370 .bypass_state_show = ixgbe_bypass_state_show,
371 .bypass_event_set = ixgbe_bypass_event_store,
372 .bypass_event_show = ixgbe_bypass_event_show,
373 .bypass_wd_timeout_set = ixgbe_bypass_wd_timeout_store,
374 .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
375 .bypass_ver_show = ixgbe_bypass_ver_show,
376 .bypass_wd_reset = ixgbe_bypass_wd_reset,
377 #endif /* RTE_NIC_BYPASS */
378 .rss_hash_update = ixgbe_dev_rss_hash_update,
379 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
380 .add_syn_filter = ixgbe_add_syn_filter,
381 .remove_syn_filter = ixgbe_remove_syn_filter,
382 .get_syn_filter = ixgbe_get_syn_filter,
383 .add_ethertype_filter = ixgbe_add_ethertype_filter,
384 .remove_ethertype_filter = ixgbe_remove_ethertype_filter,
385 .get_ethertype_filter = ixgbe_get_ethertype_filter,
386 .add_5tuple_filter = ixgbe_add_5tuple_filter,
387 .remove_5tuple_filter = ixgbe_remove_5tuple_filter,
388 .get_5tuple_filter = ixgbe_get_5tuple_filter,
392 * dev_ops for virtual function, bare necessities for basic vf
393 * operation have been implemented
395 static struct eth_dev_ops ixgbevf_eth_dev_ops = {
397 .dev_configure = ixgbevf_dev_configure,
398 .dev_start = ixgbevf_dev_start,
399 .dev_stop = ixgbevf_dev_stop,
400 .link_update = ixgbe_dev_link_update,
401 .stats_get = ixgbevf_dev_stats_get,
402 .stats_reset = ixgbevf_dev_stats_reset,
403 .dev_close = ixgbevf_dev_close,
404 .dev_infos_get = ixgbevf_dev_info_get,
405 .mtu_set = ixgbevf_dev_set_mtu,
406 .vlan_filter_set = ixgbevf_vlan_filter_set,
407 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
408 .vlan_offload_set = ixgbevf_vlan_offload_set,
409 .rx_queue_setup = ixgbe_dev_rx_queue_setup,
410 .rx_queue_release = ixgbe_dev_rx_queue_release,
411 .tx_queue_setup = ixgbe_dev_tx_queue_setup,
412 .tx_queue_release = ixgbe_dev_tx_queue_release,
413 .mac_addr_add = ixgbevf_add_mac_addr,
414 .mac_addr_remove = ixgbevf_remove_mac_addr,
418 * Atomically reads the link status information from global
419 * structure rte_eth_dev.
422 * - Pointer to the structure rte_eth_dev to read from.
423 * - Pointer to the buffer to be saved with the link status.
426 * - On success, zero.
427 * - On failure, negative value.
430 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
431 struct rte_eth_link *link)
433 struct rte_eth_link *dst = link;
434 struct rte_eth_link *src = &(dev->data->dev_link);
436 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
437 *(uint64_t *)src) == 0)
444 * Atomically writes the link status information into global
445 * structure rte_eth_dev.
448 * - Pointer to the structure rte_eth_dev to read from.
449 * - Pointer to the buffer to be saved with the link status.
452 * - On success, zero.
453 * - On failure, negative value.
456 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
457 struct rte_eth_link *link)
459 struct rte_eth_link *dst = &(dev->data->dev_link);
460 struct rte_eth_link *src = link;
462 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
463 *(uint64_t *)src) == 0)
470 * This function is the same as ixgbe_is_sfp() in ixgbe/ixgbe.h.
473 ixgbe_is_sfp(struct ixgbe_hw *hw)
475 switch (hw->phy.type) {
476 case ixgbe_phy_sfp_avago:
477 case ixgbe_phy_sfp_ftl:
478 case ixgbe_phy_sfp_intel:
479 case ixgbe_phy_sfp_unknown:
480 case ixgbe_phy_sfp_passive_tyco:
481 case ixgbe_phy_sfp_passive_unknown:
488 static inline int32_t
489 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
494 status = ixgbe_reset_hw(hw);
496 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
497 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
498 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
499 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
500 IXGBE_WRITE_FLUSH(hw);
506 ixgbe_enable_intr(struct rte_eth_dev *dev)
508 struct ixgbe_interrupt *intr =
509 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
510 struct ixgbe_hw *hw =
511 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
513 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
514 IXGBE_WRITE_FLUSH(hw);
518 * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
521 ixgbe_disable_intr(struct ixgbe_hw *hw)
523 PMD_INIT_FUNC_TRACE();
525 if (hw->mac.type == ixgbe_mac_82598EB) {
526 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
528 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
529 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
530 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
532 IXGBE_WRITE_FLUSH(hw);
536 * This function resets queue statistics mapping registers.
537 * From Niantic datasheet, Initialization of Statistics section:
538 * "...if software requires the queue counters, the RQSMR and TQSM registers
539 * must be re-programmed following a device reset.
542 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
546 for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
547 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
548 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
554 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
559 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
560 #define NB_QMAP_FIELDS_PER_QSM_REG 4
561 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
563 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
564 struct ixgbe_stat_mapping_registers *stat_mappings =
565 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
566 uint32_t qsmr_mask = 0;
567 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
571 if ((hw->mac.type != ixgbe_mac_82599EB) &&
572 (hw->mac.type != ixgbe_mac_X540) &&
573 (hw->mac.type != ixgbe_mac_X550) &&
574 (hw->mac.type != ixgbe_mac_X550EM_x))
577 PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
578 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
581 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
582 if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
583 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
586 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
588 /* Now clear any previous stat_idx set */
589 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
591 stat_mappings->tqsm[n] &= ~clearing_mask;
593 stat_mappings->rqsmr[n] &= ~clearing_mask;
595 q_map = (uint32_t)stat_idx;
596 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
597 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
599 stat_mappings->tqsm[n] |= qsmr_mask;
601 stat_mappings->rqsmr[n] |= qsmr_mask;
603 PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
604 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
606 PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
607 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
609 /* Now write the mapping in the appropriate register */
611 PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
612 stat_mappings->rqsmr[n], n);
613 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
616 PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
617 stat_mappings->tqsm[n], n);
618 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
624 ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
626 struct ixgbe_stat_mapping_registers *stat_mappings =
627 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
628 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
631 /* write whatever was in stat mapping table to the NIC */
632 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
634 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
637 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
642 ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
645 struct ixgbe_dcb_tc_config *tc;
646 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
648 dcb_config->num_tcs.pg_tcs = dcb_max_tc;
649 dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
650 for (i = 0; i < dcb_max_tc; i++) {
651 tc = &dcb_config->tc_config[i];
652 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
653 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
654 (uint8_t)(100/dcb_max_tc + (i & 1));
655 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
656 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
657 (uint8_t)(100/dcb_max_tc + (i & 1));
658 tc->pfc = ixgbe_dcb_pfc_disabled;
661 /* Initialize default user to priority mapping, UPx->TC0 */
662 tc = &dcb_config->tc_config[0];
663 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
664 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
665 for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
666 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
667 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
669 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
670 dcb_config->pfc_mode_enable = false;
671 dcb_config->vt_mode = true;
672 dcb_config->round_robin_enable = false;
673 /* support all DCB capabilities in 82599 */
674 dcb_config->support.capabilities = 0xFF;
676 /*we only support 4 Tcs for X540, X550 */
677 if (hw->mac.type == ixgbe_mac_X540 ||
678 hw->mac.type == ixgbe_mac_X550 ||
679 hw->mac.type == ixgbe_mac_X550EM_x) {
680 dcb_config->num_tcs.pg_tcs = 4;
681 dcb_config->num_tcs.pfc_tcs = 4;
686 * Ensure that all locks are released before first NVM or PHY access
689 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
694 * Phy lock should not fail in this early stage. If this is the case,
695 * it is due to an improper exit of the application.
696 * So force the release of the faulty lock. Release of common lock
697 * is done automatically by swfw_sync function.
699 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
700 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
701 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
703 ixgbe_release_swfw_semaphore(hw, mask);
706 * These ones are more tricky since they are common to all ports; but
707 * swfw_sync retries last long enough (1s) to be almost sure that if
708 * lock can not be taken it is due to an improper lock of the
711 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
712 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
713 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
715 ixgbe_release_swfw_semaphore(hw, mask);
719 * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
720 * It returns 0 on success.
723 eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
724 struct rte_eth_dev *eth_dev)
726 struct rte_pci_device *pci_dev;
727 struct ixgbe_hw *hw =
728 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
729 struct ixgbe_vfta * shadow_vfta =
730 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
731 struct ixgbe_hwstrip *hwstrip =
732 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
733 struct ixgbe_dcb_config *dcb_config =
734 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
739 PMD_INIT_FUNC_TRACE();
741 eth_dev->dev_ops = &ixgbe_eth_dev_ops;
742 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
743 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
746 * For secondary processes, we don't initialise any further as primary
747 * has already done this work. Only check we don't need a different
748 * RX and TX function.
750 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
751 struct igb_tx_queue *txq;
752 /* TX queue function in primary, set by last queue initialized
753 * Tx queue may not initialized by primary process */
754 if (eth_dev->data->tx_queues) {
755 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
756 set_tx_function(eth_dev, txq);
758 /* Use default TX function if we get here */
759 PMD_INIT_LOG(INFO, "No TX queues configured yet. "
760 "Using default TX function.");
763 if (eth_dev->data->scattered_rx)
764 eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
767 pci_dev = eth_dev->pci_dev;
769 /* Vendor and Device ID need to be set before init of shared code */
770 hw->device_id = pci_dev->id.device_id;
771 hw->vendor_id = pci_dev->id.vendor_id;
772 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
773 hw->allow_unsupported_sfp = 1;
775 /* Initialize the shared code (base driver) */
776 #ifdef RTE_NIC_BYPASS
777 diag = ixgbe_bypass_init_shared_code(hw);
779 diag = ixgbe_init_shared_code(hw);
780 #endif /* RTE_NIC_BYPASS */
782 if (diag != IXGBE_SUCCESS) {
783 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
787 /* pick up the PCI bus settings for reporting later */
788 ixgbe_get_bus_info(hw);
790 /* Unlock any pending hardware semaphore */
791 ixgbe_swfw_lock_reset(hw);
793 /* Initialize DCB configuration*/
794 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
795 ixgbe_dcb_init(hw,dcb_config);
796 /* Get Hardware Flow Control setting */
797 hw->fc.requested_mode = ixgbe_fc_full;
798 hw->fc.current_mode = ixgbe_fc_full;
799 hw->fc.pause_time = IXGBE_FC_PAUSE;
800 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
801 hw->fc.low_water[i] = IXGBE_FC_LO;
802 hw->fc.high_water[i] = IXGBE_FC_HI;
806 /* Make sure we have a good EEPROM before we read from it */
807 diag = ixgbe_validate_eeprom_checksum(hw, &csum);
808 if (diag != IXGBE_SUCCESS) {
809 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
813 #ifdef RTE_NIC_BYPASS
814 diag = ixgbe_bypass_init_hw(hw);
816 diag = ixgbe_init_hw(hw);
817 #endif /* RTE_NIC_BYPASS */
820 * Devices with copper phys will fail to initialise if ixgbe_init_hw()
821 * is called too soon after the kernel driver unbinding/binding occurs.
822 * The failure occurs in ixgbe_identify_phy_generic() for all devices,
823 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
824 * also called. See ixgbe_identify_phy_82599(). The reason for the
825 * failure is not known, and only occuts when virtualisation features
826 * are disabled in the bios. A delay of 100ms was found to be enough by
827 * trial-and-error, and is doubled to be safe.
829 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
831 diag = ixgbe_init_hw(hw);
834 if (diag == IXGBE_ERR_EEPROM_VERSION) {
835 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
836 "LOM. Please be aware there may be issues associated "
837 "with your hardware.");
838 PMD_INIT_LOG(ERR, "If you are experiencing problems "
839 "please contact your Intel or hardware representative "
840 "who provided you with this hardware.");
841 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
842 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
844 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
848 /* disable interrupt */
849 ixgbe_disable_intr(hw);
851 /* reset mappings for queue statistics hw counters*/
852 ixgbe_reset_qstat_mappings(hw);
854 /* Allocate memory for storing MAC addresses */
855 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
856 hw->mac.num_rar_entries, 0);
857 if (eth_dev->data->mac_addrs == NULL) {
859 "Failed to allocate %u bytes needed to store "
861 ETHER_ADDR_LEN * hw->mac.num_rar_entries);
864 /* Copy the permanent MAC address */
865 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
866 ð_dev->data->mac_addrs[0]);
868 /* Allocate memory for storing hash filter MAC addresses */
869 eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
870 IXGBE_VMDQ_NUM_UC_MAC, 0);
871 if (eth_dev->data->hash_mac_addrs == NULL) {
873 "Failed to allocate %d bytes needed to store MAC addresses",
874 ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
878 /* initialize the vfta */
879 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
881 /* initialize the hw strip bitmap*/
882 memset(hwstrip, 0, sizeof(*hwstrip));
884 /* initialize PF if max_vfs not zero */
885 ixgbe_pf_host_init(eth_dev);
887 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
888 /* let hardware know driver is loaded */
889 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
890 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
891 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
892 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
893 IXGBE_WRITE_FLUSH(hw);
895 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
896 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
897 (int) hw->mac.type, (int) hw->phy.type,
898 (int) hw->phy.sfp_type);
900 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
901 (int) hw->mac.type, (int) hw->phy.type);
903 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
904 eth_dev->data->port_id, pci_dev->id.vendor_id,
905 pci_dev->id.device_id);
907 rte_intr_callback_register(&(pci_dev->intr_handle),
908 ixgbe_dev_interrupt_handler, (void *)eth_dev);
910 /* enable uio intr after callback register */
911 rte_intr_enable(&(pci_dev->intr_handle));
913 /* enable support intr */
914 ixgbe_enable_intr(eth_dev);
921 * Negotiate mailbox API version with the PF.
922 * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
923 * Then we try to negotiate starting with the most recent one.
924 * If all negotiation attempts fail, then we will proceed with
925 * the default one (ixgbe_mbox_api_10).
928 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
932 /* start with highest supported, proceed down */
933 static const enum ixgbe_pfvf_api_rev sup_ver[] = {
939 i != RTE_DIM(sup_ver) &&
940 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
946 generate_random_mac_addr(struct ether_addr *mac_addr)
950 /* Set Organizationally Unique Identifier (OUI) prefix. */
951 mac_addr->addr_bytes[0] = 0x00;
952 mac_addr->addr_bytes[1] = 0x09;
953 mac_addr->addr_bytes[2] = 0xC0;
954 /* Force indication of locally assigned MAC address. */
955 mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
956 /* Generate the last 3 bytes of the MAC address with a random number. */
958 memcpy(&mac_addr->addr_bytes[3], &random, 3);
962 * Virtual Function device init
965 eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
966 struct rte_eth_dev *eth_dev)
970 struct rte_pci_device *pci_dev;
971 struct ixgbe_hw *hw =
972 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
973 struct ixgbe_vfta * shadow_vfta =
974 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
975 struct ixgbe_hwstrip *hwstrip =
976 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
977 struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
979 PMD_INIT_FUNC_TRACE();
981 eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
982 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
983 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
985 /* for secondary processes, we don't initialise any further as primary
986 * has already done this work. Only check we don't need a different
988 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
989 if (eth_dev->data->scattered_rx)
990 eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
994 pci_dev = eth_dev->pci_dev;
996 hw->device_id = pci_dev->id.device_id;
997 hw->vendor_id = pci_dev->id.vendor_id;
998 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1000 /* initialize the vfta */
1001 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1003 /* initialize the hw strip bitmap*/
1004 memset(hwstrip, 0, sizeof(*hwstrip));
1006 /* Initialize the shared code (base driver) */
1007 diag = ixgbe_init_shared_code(hw);
1008 if (diag != IXGBE_SUCCESS) {
1009 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1013 /* init_mailbox_params */
1014 hw->mbx.ops.init_params(hw);
1016 /* Disable the interrupts for VF */
1017 ixgbevf_intr_disable(hw);
1019 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1020 diag = hw->mac.ops.reset_hw(hw);
1023 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1024 * the underlying PF driver has not assigned a MAC address to the VF.
1025 * In this case, assign a random MAC address.
1027 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1028 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1032 /* negotiate mailbox API version to use with the PF. */
1033 ixgbevf_negotiate_api(hw);
1035 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1036 ixgbevf_get_queues(hw, &tcs, &tc);
1038 /* Allocate memory for storing MAC addresses */
1039 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1040 hw->mac.num_rar_entries, 0);
1041 if (eth_dev->data->mac_addrs == NULL) {
1043 "Failed to allocate %u bytes needed to store "
1045 ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1049 /* Generate a random MAC address, if none was assigned by PF. */
1050 if (is_zero_ether_addr(perm_addr)) {
1051 generate_random_mac_addr(perm_addr);
1052 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1054 rte_free(eth_dev->data->mac_addrs);
1055 eth_dev->data->mac_addrs = NULL;
1058 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1059 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1060 "%02x:%02x:%02x:%02x:%02x:%02x",
1061 perm_addr->addr_bytes[0],
1062 perm_addr->addr_bytes[1],
1063 perm_addr->addr_bytes[2],
1064 perm_addr->addr_bytes[3],
1065 perm_addr->addr_bytes[4],
1066 perm_addr->addr_bytes[5]);
1069 /* Copy the permanent MAC address */
1070 ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]);
1072 /* reset the hardware with the new settings */
1073 diag = hw->mac.ops.start_hw(hw);
1079 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1083 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1084 eth_dev->data->port_id, pci_dev->id.vendor_id,
1085 pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1090 static struct eth_driver rte_ixgbe_pmd = {
1092 .name = "rte_ixgbe_pmd",
1093 .id_table = pci_id_ixgbe_map,
1094 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1096 .eth_dev_init = eth_ixgbe_dev_init,
1097 .dev_private_size = sizeof(struct ixgbe_adapter),
1101 * virtual function driver struct
1103 static struct eth_driver rte_ixgbevf_pmd = {
1105 .name = "rte_ixgbevf_pmd",
1106 .id_table = pci_id_ixgbevf_map,
1107 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1109 .eth_dev_init = eth_ixgbevf_dev_init,
1110 .dev_private_size = sizeof(struct ixgbe_adapter),
1114 * Driver initialization routine.
1115 * Invoked once at EAL init time.
1116 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
1119 rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
1121 PMD_INIT_FUNC_TRACE();
1123 rte_eth_driver_register(&rte_ixgbe_pmd);
1128 * VF Driver initialization routine.
1129 * Invoked one at EAL init time.
1130 * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
1133 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
1135 PMD_INIT_FUNC_TRACE();
1137 rte_eth_driver_register(&rte_ixgbevf_pmd);
1142 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1144 struct ixgbe_hw *hw =
1145 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1146 struct ixgbe_vfta * shadow_vfta =
1147 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1152 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1153 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1154 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1159 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1161 /* update local VFTA copy */
1162 shadow_vfta->vfta[vid_idx] = vfta;
1168 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1171 ixgbe_vlan_hw_strip_enable(dev, queue);
1173 ixgbe_vlan_hw_strip_disable(dev, queue);
1177 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1179 struct ixgbe_hw *hw =
1180 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1182 /* Only the high 16-bits is valid */
1183 IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
1187 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1189 struct ixgbe_hw *hw =
1190 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1193 PMD_INIT_FUNC_TRACE();
1195 /* Filter Table Disable */
1196 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1197 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1199 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1203 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1205 struct ixgbe_hw *hw =
1206 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1207 struct ixgbe_vfta * shadow_vfta =
1208 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1212 PMD_INIT_FUNC_TRACE();
1214 /* Filter Table Enable */
1215 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1216 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1217 vlnctrl |= IXGBE_VLNCTRL_VFE;
1219 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1221 /* write whatever is in local vfta copy */
1222 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1223 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1227 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1229 struct ixgbe_hwstrip *hwstrip =
1230 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1232 if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
1236 IXGBE_SET_HWSTRIP(hwstrip, queue);
1238 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1242 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1244 struct ixgbe_hw *hw =
1245 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1248 PMD_INIT_FUNC_TRACE();
1250 if (hw->mac.type == ixgbe_mac_82598EB) {
1251 /* No queue level support */
1252 PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
1256 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1257 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1258 ctrl &= ~IXGBE_RXDCTL_VME;
1259 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1261 /* record those setting for HW strip per queue */
1262 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1266 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1268 struct ixgbe_hw *hw =
1269 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1272 PMD_INIT_FUNC_TRACE();
1274 if (hw->mac.type == ixgbe_mac_82598EB) {
1275 /* No queue level supported */
1276 PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
1280 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1281 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1282 ctrl |= IXGBE_RXDCTL_VME;
1283 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1285 /* record those setting for HW strip per queue */
1286 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1290 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
1292 struct ixgbe_hw *hw =
1293 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1297 PMD_INIT_FUNC_TRACE();
1299 if (hw->mac.type == ixgbe_mac_82598EB) {
1300 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1301 ctrl &= ~IXGBE_VLNCTRL_VME;
1302 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1305 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1306 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1307 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1308 ctrl &= ~IXGBE_RXDCTL_VME;
1309 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1311 /* record those setting for HW strip per queue */
1312 ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
1318 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
1320 struct ixgbe_hw *hw =
1321 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1325 PMD_INIT_FUNC_TRACE();
1327 if (hw->mac.type == ixgbe_mac_82598EB) {
1328 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1329 ctrl |= IXGBE_VLNCTRL_VME;
1330 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1333 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1334 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1335 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1336 ctrl |= IXGBE_RXDCTL_VME;
1337 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1339 /* record those setting for HW strip per queue */
1340 ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
1346 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1348 struct ixgbe_hw *hw =
1349 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1352 PMD_INIT_FUNC_TRACE();
1354 /* DMATXCTRL: Geric Double VLAN Disable */
1355 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1356 ctrl &= ~IXGBE_DMATXCTL_GDV;
1357 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1359 /* CTRL_EXT: Global Double VLAN Disable */
1360 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1361 ctrl &= ~IXGBE_EXTENDED_VLAN;
1362 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1367 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1369 struct ixgbe_hw *hw =
1370 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1373 PMD_INIT_FUNC_TRACE();
1375 /* DMATXCTRL: Geric Double VLAN Enable */
1376 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1377 ctrl |= IXGBE_DMATXCTL_GDV;
1378 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1380 /* CTRL_EXT: Global Double VLAN Enable */
1381 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1382 ctrl |= IXGBE_EXTENDED_VLAN;
1383 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1386 * VET EXT field in the EXVET register = 0x8100 by default
1387 * So no need to change. Same to VT field of DMATXCTL register
1392 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1394 if(mask & ETH_VLAN_STRIP_MASK){
1395 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1396 ixgbe_vlan_hw_strip_enable_all(dev);
1398 ixgbe_vlan_hw_strip_disable_all(dev);
1401 if(mask & ETH_VLAN_FILTER_MASK){
1402 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1403 ixgbe_vlan_hw_filter_enable(dev);
1405 ixgbe_vlan_hw_filter_disable(dev);
1408 if(mask & ETH_VLAN_EXTEND_MASK){
1409 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1410 ixgbe_vlan_hw_extend_enable(dev);
1412 ixgbe_vlan_hw_extend_disable(dev);
1417 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1419 struct ixgbe_hw *hw =
1420 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1421 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
1422 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1423 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
1424 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
1428 ixgbe_dev_configure(struct rte_eth_dev *dev)
1430 struct ixgbe_interrupt *intr =
1431 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1433 PMD_INIT_FUNC_TRACE();
1435 /* set flag to update link status after init */
1436 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1442 * Configure device link speed and setup link.
1443 * It returns 0 on success.
1446 ixgbe_dev_start(struct rte_eth_dev *dev)
1448 struct ixgbe_hw *hw =
1449 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1450 struct ixgbe_vf_info *vfinfo =
1451 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
1452 int err, link_up = 0, negotiate = 0;
1458 PMD_INIT_FUNC_TRACE();
1460 /* IXGBE devices don't support half duplex */
1461 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
1462 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
1463 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
1464 dev->data->dev_conf.link_duplex,
1465 dev->data->port_id);
1470 hw->adapter_stopped = FALSE;
1471 ixgbe_stop_adapter(hw);
1473 /* reinitialize adapter
1474 * this calls reset and start */
1475 status = ixgbe_pf_reset_hw(hw);
1478 hw->mac.ops.start_hw(hw);
1479 hw->mac.get_link_status = true;
1481 /* configure PF module if SRIOV enabled */
1482 ixgbe_pf_host_configure(dev);
1484 /* initialize transmission unit */
1485 ixgbe_dev_tx_init(dev);
1487 /* This can fail when allocating mbufs for descriptor rings */
1488 err = ixgbe_dev_rx_init(dev);
1490 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1494 ixgbe_dev_rxtx_start(dev);
1496 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1497 err = hw->mac.ops.setup_sfp(hw);
1502 /* Turn on the laser */
1503 ixgbe_enable_tx_laser(hw);
1505 /* Skip link setup if loopback mode is enabled for 82599. */
1506 if (hw->mac.type == ixgbe_mac_82599EB &&
1507 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
1508 goto skip_link_setup;
1510 err = ixgbe_check_link(hw, &speed, &link_up, 0);
1513 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
1517 switch(dev->data->dev_conf.link_speed) {
1518 case ETH_LINK_SPEED_AUTONEG:
1519 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
1520 IXGBE_LINK_SPEED_82599_AUTONEG :
1521 IXGBE_LINK_SPEED_82598_AUTONEG;
1523 case ETH_LINK_SPEED_100:
1525 * Invalid for 82598 but error will be detected by
1526 * ixgbe_setup_link()
1528 speed = IXGBE_LINK_SPEED_100_FULL;
1530 case ETH_LINK_SPEED_1000:
1531 speed = IXGBE_LINK_SPEED_1GB_FULL;
1533 case ETH_LINK_SPEED_10000:
1534 speed = IXGBE_LINK_SPEED_10GB_FULL;
1537 PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
1538 dev->data->dev_conf.link_speed,
1539 dev->data->port_id);
1543 err = ixgbe_setup_link(hw, speed, link_up);
1549 /* check if lsc interrupt is enabled */
1550 if (dev->data->dev_conf.intr_conf.lsc != 0)
1551 ixgbe_dev_lsc_interrupt_setup(dev);
1553 /* resume enabled intr since hw reset */
1554 ixgbe_enable_intr(dev);
1556 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
1557 ETH_VLAN_EXTEND_MASK;
1558 ixgbe_vlan_offload_set(dev, mask);
1560 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1561 /* Enable vlan filtering for VMDq */
1562 ixgbe_vmdq_vlan_hw_filter_enable(dev);
1565 /* Configure DCB hw */
1566 ixgbe_configure_dcb(dev);
1568 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1569 err = ixgbe_fdir_configure(dev);
1574 /* Restore vf rate limit */
1575 if (vfinfo != NULL) {
1576 for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
1577 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1578 if (vfinfo[vf].tx_rate[idx] != 0)
1579 ixgbe_set_vf_rate_limit(dev, vf,
1580 vfinfo[vf].tx_rate[idx],
1584 ixgbe_restore_statistics_mapping(dev);
1589 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
1590 ixgbe_dev_clear_queues(dev);
1595 * Stop device: disable rx and tx functions to allow for reconfiguring.
1598 ixgbe_dev_stop(struct rte_eth_dev *dev)
1600 struct rte_eth_link link;
1601 struct ixgbe_hw *hw =
1602 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1603 struct ixgbe_vf_info *vfinfo =
1604 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
1607 PMD_INIT_FUNC_TRACE();
1609 /* disable interrupts */
1610 ixgbe_disable_intr(hw);
1613 ixgbe_pf_reset_hw(hw);
1614 hw->adapter_stopped = FALSE;
1617 ixgbe_stop_adapter(hw);
1619 for (vf = 0; vfinfo != NULL &&
1620 vf < dev->pci_dev->max_vfs; vf++)
1621 vfinfo[vf].clear_to_send = false;
1623 /* Turn off the laser */
1624 ixgbe_disable_tx_laser(hw);
1626 ixgbe_dev_clear_queues(dev);
1628 /* Clear stored conf */
1629 dev->data->scattered_rx = 0;
1631 /* Clear recorded link status */
1632 memset(&link, 0, sizeof(link));
1633 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1637 * Set device link up: enable tx laser.
1640 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
1642 struct ixgbe_hw *hw =
1643 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1644 if (hw->mac.type == ixgbe_mac_82599EB) {
1645 #ifdef RTE_NIC_BYPASS
1646 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
1647 /* Not suported in bypass mode */
1648 PMD_INIT_LOG(ERR, "Set link up is not supported "
1649 "by device id 0x%x", hw->device_id);
1653 /* Turn on the laser */
1654 ixgbe_enable_tx_laser(hw);
1658 PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
1664 * Set device link down: disable tx laser.
1667 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
1669 struct ixgbe_hw *hw =
1670 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1671 if (hw->mac.type == ixgbe_mac_82599EB) {
1672 #ifdef RTE_NIC_BYPASS
1673 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
1674 /* Not suported in bypass mode */
1675 PMD_INIT_LOG(ERR, "Set link down is not supported "
1676 "by device id 0x%x", hw->device_id);
1680 /* Turn off the laser */
1681 ixgbe_disable_tx_laser(hw);
1685 PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
1691 * Reest and stop device.
1694 ixgbe_dev_close(struct rte_eth_dev *dev)
1696 struct ixgbe_hw *hw =
1697 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1699 PMD_INIT_FUNC_TRACE();
1701 ixgbe_pf_reset_hw(hw);
1703 ixgbe_dev_stop(dev);
1704 hw->adapter_stopped = 1;
1706 ixgbe_disable_pcie_master(hw);
1708 /* reprogram the RAR[0] in case user changed it. */
1709 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1713 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
1716 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1718 struct ixgbe_hw *hw =
1719 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1720 struct ixgbe_hw_stats *hw_stats =
1721 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1722 uint32_t bprc, lxon, lxoff, total;
1723 uint64_t total_missed_rx, total_qbrc, total_qprc;
1726 total_missed_rx = 0;
1730 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1731 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1732 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1733 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1735 for (i = 0; i < 8; i++) {
1737 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1738 /* global total per queue */
1739 hw_stats->mpc[i] += mp;
1740 /* Running comprehensive total for stats display */
1741 total_missed_rx += hw_stats->mpc[i];
1742 if (hw->mac.type == ixgbe_mac_82598EB)
1743 hw_stats->rnbc[i] +=
1744 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1745 hw_stats->pxontxc[i] +=
1746 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1747 hw_stats->pxonrxc[i] +=
1748 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1749 hw_stats->pxofftxc[i] +=
1750 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1751 hw_stats->pxoffrxc[i] +=
1752 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1753 hw_stats->pxon2offc[i] +=
1754 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1756 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
1757 hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1758 hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1759 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
1760 hw_stats->qbrc[i] +=
1761 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
1762 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
1763 hw_stats->qbtc[i] +=
1764 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
1765 hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1767 total_qprc += hw_stats->qprc[i];
1768 total_qbrc += hw_stats->qbrc[i];
1770 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1771 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1772 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1774 /* Note that gprc counts missed packets */
1775 hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1777 if (hw->mac.type != ixgbe_mac_82598EB) {
1778 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
1779 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1780 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
1781 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1782 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
1783 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1784 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1785 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1787 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1788 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1789 /* 82598 only has a counter in the high register */
1790 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1791 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1792 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1796 * Workaround: mprc hardware is incorrectly counting
1797 * broadcasts, so for now we subtract those.
1799 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1800 hw_stats->bprc += bprc;
1801 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1802 if (hw->mac.type == ixgbe_mac_82598EB)
1803 hw_stats->mprc -= bprc;
1805 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1806 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1807 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1808 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1809 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1810 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1812 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1813 hw_stats->lxontxc += lxon;
1814 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1815 hw_stats->lxofftxc += lxoff;
1816 total = lxon + lxoff;
1818 hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1819 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1820 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1821 hw_stats->gptc -= total;
1822 hw_stats->mptc -= total;
1823 hw_stats->ptc64 -= total;
1824 hw_stats->gotc -= total * ETHER_MIN_LEN;
1826 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1827 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1828 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1829 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1830 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1831 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1832 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1833 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1834 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1835 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1836 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1837 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1838 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1839 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1840 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1841 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1842 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1843 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1844 /* Only read FCOE on 82599 */
1845 if (hw->mac.type != ixgbe_mac_82598EB) {
1846 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1847 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1848 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1849 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1850 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1856 /* Fill out the rte_eth_stats statistics structure */
1857 stats->ipackets = total_qprc;
1858 stats->ibytes = total_qbrc;
1859 stats->opackets = hw_stats->gptc;
1860 stats->obytes = hw_stats->gotc;
1861 stats->imcasts = hw_stats->mprc;
1863 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
1864 stats->q_ipackets[i] = hw_stats->qprc[i];
1865 stats->q_opackets[i] = hw_stats->qptc[i];
1866 stats->q_ibytes[i] = hw_stats->qbrc[i];
1867 stats->q_obytes[i] = hw_stats->qbtc[i];
1868 stats->q_errors[i] = hw_stats->qprdc[i];
1872 stats->ibadcrc = hw_stats->crcerrs;
1873 stats->ibadlen = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
1874 stats->imissed = total_missed_rx;
1875 stats->ierrors = stats->ibadcrc +
1878 hw_stats->illerrc + hw_stats->errbc;
1883 /* XON/XOFF pause frames */
1884 stats->tx_pause_xon = hw_stats->lxontxc;
1885 stats->rx_pause_xon = hw_stats->lxonrxc;
1886 stats->tx_pause_xoff = hw_stats->lxofftxc;
1887 stats->rx_pause_xoff = hw_stats->lxoffrxc;
1889 /* Flow Director Stats registers */
1890 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1891 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1892 stats->fdirmatch = hw_stats->fdirmatch;
1893 stats->fdirmiss = hw_stats->fdirmiss;
1897 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
1899 struct ixgbe_hw_stats *stats =
1900 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1902 /* HW registers are cleared on read */
1903 ixgbe_dev_stats_get(dev, NULL);
1905 /* Reset software totals */
1906 memset(stats, 0, sizeof(*stats));
1910 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1912 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1913 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1914 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1916 /* Good Rx packet, include VF loopback */
1917 UPDATE_VF_STAT(IXGBE_VFGPRC,
1918 hw_stats->last_vfgprc, hw_stats->vfgprc);
1920 /* Good Rx octets, include VF loopback */
1921 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1922 hw_stats->last_vfgorc, hw_stats->vfgorc);
1924 /* Good Tx packet, include VF loopback */
1925 UPDATE_VF_STAT(IXGBE_VFGPTC,
1926 hw_stats->last_vfgptc, hw_stats->vfgptc);
1928 /* Good Tx octets, include VF loopback */
1929 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1930 hw_stats->last_vfgotc, hw_stats->vfgotc);
1932 /* Rx Multicst Packet */
1933 UPDATE_VF_STAT(IXGBE_VFMPRC,
1934 hw_stats->last_vfmprc, hw_stats->vfmprc);
1939 memset(stats, 0, sizeof(*stats));
1940 stats->ipackets = hw_stats->vfgprc;
1941 stats->ibytes = hw_stats->vfgorc;
1942 stats->opackets = hw_stats->vfgptc;
1943 stats->obytes = hw_stats->vfgotc;
1944 stats->imcasts = hw_stats->vfmprc;
1948 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
1950 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1951 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1953 /* Sync HW register to the last stats */
1954 ixgbevf_dev_stats_get(dev, NULL);
1956 /* reset HW current stats*/
1957 hw_stats->vfgprc = 0;
1958 hw_stats->vfgorc = 0;
1959 hw_stats->vfgptc = 0;
1960 hw_stats->vfgotc = 0;
1961 hw_stats->vfmprc = 0;
1966 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1968 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1970 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1971 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1972 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
1973 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
1974 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1975 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
1976 dev_info->max_vfs = dev->pci_dev->max_vfs;
1977 if (hw->mac.type == ixgbe_mac_82598EB)
1978 dev_info->max_vmdq_pools = ETH_16_POOLS;
1980 dev_info->max_vmdq_pools = ETH_64_POOLS;
1981 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
1982 dev_info->rx_offload_capa =
1983 DEV_RX_OFFLOAD_VLAN_STRIP |
1984 DEV_RX_OFFLOAD_IPV4_CKSUM |
1985 DEV_RX_OFFLOAD_UDP_CKSUM |
1986 DEV_RX_OFFLOAD_TCP_CKSUM;
1987 dev_info->tx_offload_capa =
1988 DEV_TX_OFFLOAD_VLAN_INSERT |
1989 DEV_TX_OFFLOAD_IPV4_CKSUM |
1990 DEV_TX_OFFLOAD_UDP_CKSUM |
1991 DEV_TX_OFFLOAD_TCP_CKSUM |
1992 DEV_TX_OFFLOAD_SCTP_CKSUM |
1993 DEV_TX_OFFLOAD_TCP_TSO;
1995 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1997 .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
1998 .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
1999 .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2001 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2005 dev_info->default_txconf = (struct rte_eth_txconf) {
2007 .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2008 .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2009 .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2011 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2012 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2013 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2014 ETH_TXQ_FLAGS_NOOFFLOADS,
2016 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2020 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
2021 struct rte_eth_dev_info *dev_info)
2023 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2025 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2026 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2027 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
2028 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
2029 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2030 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2031 dev_info->max_vfs = dev->pci_dev->max_vfs;
2032 if (hw->mac.type == ixgbe_mac_82598EB)
2033 dev_info->max_vmdq_pools = ETH_16_POOLS;
2035 dev_info->max_vmdq_pools = ETH_64_POOLS;
2036 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
2037 DEV_RX_OFFLOAD_IPV4_CKSUM |
2038 DEV_RX_OFFLOAD_UDP_CKSUM |
2039 DEV_RX_OFFLOAD_TCP_CKSUM;
2040 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
2041 DEV_TX_OFFLOAD_IPV4_CKSUM |
2042 DEV_TX_OFFLOAD_UDP_CKSUM |
2043 DEV_TX_OFFLOAD_TCP_CKSUM |
2044 DEV_TX_OFFLOAD_SCTP_CKSUM;
2046 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2048 .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2049 .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2050 .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2052 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2056 dev_info->default_txconf = (struct rte_eth_txconf) {
2058 .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2059 .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2060 .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2062 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2063 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2064 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2065 ETH_TXQ_FLAGS_NOOFFLOADS,
2069 /* return 0 means link status changed, -1 means not changed */
2071 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2073 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2074 struct rte_eth_link link, old;
2075 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
2079 link.link_status = 0;
2080 link.link_speed = 0;
2081 link.link_duplex = 0;
2082 memset(&old, 0, sizeof(old));
2083 rte_ixgbe_dev_atomic_read_link_status(dev, &old);
2085 /* check if it needs to wait to complete, if lsc interrupt is enabled */
2086 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2087 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
2089 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
2091 link.link_speed = ETH_LINK_SPEED_100;
2092 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2093 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2094 if (link.link_status == old.link_status)
2099 if (link_speed == IXGBE_LINK_SPEED_UNKNOWN &&
2100 !hw->mac.get_link_status) {
2101 memcpy(&link, &old, sizeof(link));
2106 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2107 if (link.link_status == old.link_status)
2111 link.link_status = 1;
2112 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2114 switch (link_speed) {
2116 case IXGBE_LINK_SPEED_UNKNOWN:
2117 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2118 link.link_speed = ETH_LINK_SPEED_100;
2121 case IXGBE_LINK_SPEED_100_FULL:
2122 link.link_speed = ETH_LINK_SPEED_100;
2125 case IXGBE_LINK_SPEED_1GB_FULL:
2126 link.link_speed = ETH_LINK_SPEED_1000;
2129 case IXGBE_LINK_SPEED_10GB_FULL:
2130 link.link_speed = ETH_LINK_SPEED_10000;
2133 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2135 if (link.link_status == old.link_status)
2142 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2144 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2147 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2148 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2149 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2153 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2155 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2158 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2159 fctrl &= (~IXGBE_FCTRL_UPE);
2160 if (dev->data->all_multicast == 1)
2161 fctrl |= IXGBE_FCTRL_MPE;
2163 fctrl &= (~IXGBE_FCTRL_MPE);
2164 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2168 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2170 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2173 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2174 fctrl |= IXGBE_FCTRL_MPE;
2175 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2179 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2181 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2184 if (dev->data->promiscuous == 1)
2185 return; /* must remain in all_multicast mode */
2187 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2188 fctrl &= (~IXGBE_FCTRL_MPE);
2189 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2193 * It clears the interrupt causes and enables the interrupt.
2194 * It will be called once only during nic initialized.
2197 * Pointer to struct rte_eth_dev.
2200 * - On success, zero.
2201 * - On failure, a negative value.
2204 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
2206 struct ixgbe_interrupt *intr =
2207 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2209 ixgbe_dev_link_status_print(dev);
2210 intr->mask |= IXGBE_EICR_LSC;
2216 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
2219 * Pointer to struct rte_eth_dev.
2222 * - On success, zero.
2223 * - On failure, a negative value.
2226 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2229 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2230 struct ixgbe_interrupt *intr =
2231 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2233 /* clear all cause mask */
2234 ixgbe_disable_intr(hw);
2236 /* read-on-clear nic registers here */
2237 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2238 PMD_DRV_LOG(INFO, "eicr %x", eicr);
2241 if (eicr & IXGBE_EICR_LSC) {
2242 /* set flag for async link update */
2243 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2246 if (eicr & IXGBE_EICR_MAILBOX)
2247 intr->flags |= IXGBE_FLAG_MAILBOX;
2253 * It gets and then prints the link status.
2256 * Pointer to struct rte_eth_dev.
2259 * - On success, zero.
2260 * - On failure, a negative value.
2263 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
2265 struct rte_eth_link link;
2267 memset(&link, 0, sizeof(link));
2268 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
2269 if (link.link_status) {
2270 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2271 (int)(dev->data->port_id),
2272 (unsigned)link.link_speed,
2273 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2274 "full-duplex" : "half-duplex");
2276 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2277 (int)(dev->data->port_id));
2279 PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
2280 dev->pci_dev->addr.domain,
2281 dev->pci_dev->addr.bus,
2282 dev->pci_dev->addr.devid,
2283 dev->pci_dev->addr.function);
2287 * It executes link_update after knowing an interrupt occurred.
2290 * Pointer to struct rte_eth_dev.
2293 * - On success, zero.
2294 * - On failure, a negative value.
2297 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
2299 struct ixgbe_interrupt *intr =
2300 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2302 struct rte_eth_link link;
2303 int intr_enable_delay = false;
2305 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2307 if (intr->flags & IXGBE_FLAG_MAILBOX) {
2308 ixgbe_pf_mbx_process(dev);
2309 intr->flags &= ~IXGBE_FLAG_MAILBOX;
2312 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
2313 /* get the link status before link update, for predicting later */
2314 memset(&link, 0, sizeof(link));
2315 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
2317 ixgbe_dev_link_update(dev, 0);
2320 if (!link.link_status)
2321 /* handle it 1 sec later, wait it being stable */
2322 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
2323 /* likely to down */
2325 /* handle it 4 sec later, wait it being stable */
2326 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
2328 ixgbe_dev_link_status_print(dev);
2330 intr_enable_delay = true;
2333 if (intr_enable_delay) {
2334 if (rte_eal_alarm_set(timeout * 1000,
2335 ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
2336 PMD_DRV_LOG(ERR, "Error setting alarm");
2338 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2339 ixgbe_enable_intr(dev);
2340 rte_intr_enable(&(dev->pci_dev->intr_handle));
2348 * Interrupt handler which shall be registered for alarm callback for delayed
2349 * handling specific interrupt to wait for the stable nic state. As the
2350 * NIC interrupt state is not stable for ixgbe after link is just down,
2351 * it needs to wait 4 seconds to get the stable status.
2354 * Pointer to interrupt handle.
2356 * The address of parameter (struct rte_eth_dev *) regsitered before.
2362 ixgbe_dev_interrupt_delayed_handler(void *param)
2364 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2365 struct ixgbe_interrupt *intr =
2366 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2367 struct ixgbe_hw *hw =
2368 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2371 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2372 if (eicr & IXGBE_EICR_MAILBOX)
2373 ixgbe_pf_mbx_process(dev);
2375 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
2376 ixgbe_dev_link_update(dev, 0);
2377 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
2378 ixgbe_dev_link_status_print(dev);
2379 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
2382 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2383 ixgbe_enable_intr(dev);
2384 rte_intr_enable(&(dev->pci_dev->intr_handle));
2388 * Interrupt handler triggered by NIC for handling
2389 * specific interrupt.
2392 * Pointer to interrupt handle.
2394 * The address of parameter (struct rte_eth_dev *) regsitered before.
2400 ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
2403 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2404 ixgbe_dev_interrupt_get_status(dev);
2405 ixgbe_dev_interrupt_action(dev);
2409 ixgbe_dev_led_on(struct rte_eth_dev *dev)
2411 struct ixgbe_hw *hw;
2413 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2414 return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
2418 ixgbe_dev_led_off(struct rte_eth_dev *dev)
2420 struct ixgbe_hw *hw;
2422 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2423 return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
2427 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2429 struct ixgbe_hw *hw;
2435 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2437 fc_conf->pause_time = hw->fc.pause_time;
2438 fc_conf->high_water = hw->fc.high_water[0];
2439 fc_conf->low_water = hw->fc.low_water[0];
2440 fc_conf->send_xon = hw->fc.send_xon;
2441 fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2444 * Return rx_pause status according to actual setting of
2447 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2448 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
2454 * Return tx_pause status according to actual setting of
2457 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2458 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
2463 if (rx_pause && tx_pause)
2464 fc_conf->mode = RTE_FC_FULL;
2466 fc_conf->mode = RTE_FC_RX_PAUSE;
2468 fc_conf->mode = RTE_FC_TX_PAUSE;
2470 fc_conf->mode = RTE_FC_NONE;
2476 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2478 struct ixgbe_hw *hw;
2480 uint32_t rx_buf_size;
2481 uint32_t max_high_water;
2483 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
2490 PMD_INIT_FUNC_TRACE();
2492 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2493 if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
2495 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
2496 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2499 * At least reserve one Ethernet frame for watermark
2500 * high_water/low_water in kilo bytes for ixgbe
2502 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
2503 if ((fc_conf->high_water > max_high_water) ||
2504 (fc_conf->high_water < fc_conf->low_water)) {
2505 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2506 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2510 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
2511 hw->fc.pause_time = fc_conf->pause_time;
2512 hw->fc.high_water[0] = fc_conf->high_water;
2513 hw->fc.low_water[0] = fc_conf->low_water;
2514 hw->fc.send_xon = fc_conf->send_xon;
2516 err = ixgbe_fc_enable(hw);
2518 /* Not negotiated is not an error case */
2519 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
2521 /* check if we want to forward MAC frames - driver doesn't have native
2522 * capability to do that, so we'll write the registers ourselves */
2524 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2526 /* set or clear MFLCN.PMCF bit depending on configuration */
2527 if (fc_conf->mac_ctrl_frame_fwd != 0)
2528 mflcn |= IXGBE_MFLCN_PMCF;
2530 mflcn &= ~IXGBE_MFLCN_PMCF;
2532 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
2533 IXGBE_WRITE_FLUSH(hw);
2538 PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
2543 * ixgbe_pfc_enable_generic - Enable flow control
2544 * @hw: pointer to hardware structure
2545 * @tc_num: traffic class number
2546 * Enable flow control according to the current settings.
2549 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
2552 uint32_t mflcn_reg, fccfg_reg;
2554 uint32_t fcrtl, fcrth;
2558 /* Validate the water mark configuration */
2559 if (!hw->fc.pause_time) {
2560 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2564 /* Low water mark of zero causes XOFF floods */
2565 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
2566 /* High/Low water can not be 0 */
2567 if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
2568 PMD_INIT_LOG(ERR, "Invalid water mark configuration");
2569 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2573 if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
2574 PMD_INIT_LOG(ERR, "Invalid water mark configuration");
2575 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2579 /* Negotiate the fc mode to use */
2580 ixgbe_fc_autoneg(hw);
2582 /* Disable any previous flow control settings */
2583 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2584 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
2586 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2587 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2589 switch (hw->fc.current_mode) {
2592 * If the count of enabled RX Priority Flow control >1,
2593 * and the TX pause can not be disabled
2596 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2597 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
2598 if (reg & IXGBE_FCRTH_FCEN)
2602 fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
2604 case ixgbe_fc_rx_pause:
2606 * Rx Flow control is enabled and Tx Flow control is
2607 * disabled by software override. Since there really
2608 * isn't a way to advertise that we are capable of RX
2609 * Pause ONLY, we will advertise that we support both
2610 * symmetric and asymmetric Rx PAUSE. Later, we will
2611 * disable the adapter's ability to send PAUSE frames.
2613 mflcn_reg |= IXGBE_MFLCN_RPFCE;
2615 * If the count of enabled RX Priority Flow control >1,
2616 * and the TX pause can not be disabled
2619 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2620 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
2621 if (reg & IXGBE_FCRTH_FCEN)
2625 fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
2627 case ixgbe_fc_tx_pause:
2629 * Tx Flow control is enabled, and Rx Flow control is
2630 * disabled by software override.
2632 fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
2635 /* Flow control (both Rx and Tx) is enabled by SW override. */
2636 mflcn_reg |= IXGBE_MFLCN_RPFCE;
2637 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
2640 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
2641 ret_val = IXGBE_ERR_CONFIG;
2646 /* Set 802.3x based flow control settings. */
2647 mflcn_reg |= IXGBE_MFLCN_DPF;
2648 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2649 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2651 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2652 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2653 hw->fc.high_water[tc_num]) {
2654 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
2655 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
2656 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
2658 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
2660 * In order to prevent Tx hangs when the internal Tx
2661 * switch is enabled we must set the high water mark
2662 * to the maximum FCRTH value. This allows the Tx
2663 * switch to function even under heavy Rx workloads.
2665 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
2667 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
2669 /* Configure pause time (2 TCs per register) */
2670 reg = hw->fc.pause_time * 0x00010001;
2671 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2672 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2674 /* Configure flow control refresh threshold value */
2675 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2682 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
2684 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2685 int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
2687 if(hw->mac.type != ixgbe_mac_82598EB) {
2688 ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
2694 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
2697 uint32_t rx_buf_size;
2698 uint32_t max_high_water;
2700 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
2701 struct ixgbe_hw *hw =
2702 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2703 struct ixgbe_dcb_config *dcb_config =
2704 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
2706 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
2713 PMD_INIT_FUNC_TRACE();
2715 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
2716 tc_num = map[pfc_conf->priority];
2717 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
2718 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2720 * At least reserve one Ethernet frame for watermark
2721 * high_water/low_water in kilo bytes for ixgbe
2723 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
2724 if ((pfc_conf->fc.high_water > max_high_water) ||
2725 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
2726 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2727 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2731 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
2732 hw->fc.pause_time = pfc_conf->fc.pause_time;
2733 hw->fc.send_xon = pfc_conf->fc.send_xon;
2734 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
2735 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
2737 err = ixgbe_dcb_pfc_enable(dev,tc_num);
2739 /* Not negotiated is not an error case */
2740 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
2743 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
2748 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2749 struct rte_eth_rss_reta_entry64 *reta_conf,
2754 uint16_t idx, shift;
2755 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2757 PMD_INIT_FUNC_TRACE();
2758 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2759 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2760 "(%d) doesn't match the number hardware can supported "
2761 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2765 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
2766 idx = i / RTE_RETA_GROUP_SIZE;
2767 shift = i % RTE_RETA_GROUP_SIZE;
2768 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2772 if (mask == IXGBE_4_BIT_MASK)
2775 r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
2776 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
2777 if (mask & (0x1 << j))
2778 reta |= reta_conf[idx].reta[shift + j] <<
2781 reta |= r & (IXGBE_8_BIT_MASK <<
2784 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2791 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2792 struct rte_eth_rss_reta_entry64 *reta_conf,
2797 uint16_t idx, shift;
2798 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2800 PMD_INIT_FUNC_TRACE();
2801 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2802 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2803 "(%d) doesn't match the number hardware can supported "
2804 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2808 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
2809 idx = i / RTE_RETA_GROUP_SIZE;
2810 shift = i % RTE_RETA_GROUP_SIZE;
2811 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2816 reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
2817 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
2818 if (mask & (0x1 << j))
2819 reta_conf[idx].reta[shift + j] =
2820 ((reta >> (CHAR_BIT * j)) &
2829 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2830 uint32_t index, uint32_t pool)
2832 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2833 uint32_t enable_addr = 1;
2835 ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
2839 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2841 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2843 ixgbe_clear_rar(hw, index);
2847 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2851 struct ixgbe_hw *hw;
2852 struct rte_eth_dev_info dev_info;
2853 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2855 ixgbe_dev_info_get(dev, &dev_info);
2857 /* check that mtu is within the allowed range */
2858 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
2861 /* refuse mtu that requires the support of scattered packets when this
2862 * feature has not been enabled before. */
2863 if (!dev->data->scattered_rx &&
2864 (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
2865 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
2868 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2869 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2871 /* switch to jumbo mode if needed */
2872 if (frame_size > ETHER_MAX_LEN) {
2873 dev->data->dev_conf.rxmode.jumbo_frame = 1;
2874 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2876 dev->data->dev_conf.rxmode.jumbo_frame = 0;
2877 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
2879 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2881 /* update max frame size */
2882 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2884 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
2885 maxfrs &= 0x0000FFFF;
2886 maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
2887 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
2893 * Virtual Function operations
2896 ixgbevf_intr_disable(struct ixgbe_hw *hw)
2898 PMD_INIT_FUNC_TRACE();
2900 /* Clear interrupt mask to stop from interrupts being generated */
2901 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
2903 IXGBE_WRITE_FLUSH(hw);
2907 ixgbevf_dev_configure(struct rte_eth_dev *dev)
2909 struct rte_eth_conf* conf = &dev->data->dev_conf;
2911 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
2912 dev->data->port_id);
2915 * VF has no ability to enable/disable HW CRC
2916 * Keep the persistent behavior the same as Host PF
2918 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
2919 if (!conf->rxmode.hw_strip_crc) {
2920 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
2921 conf->rxmode.hw_strip_crc = 1;
2924 if (conf->rxmode.hw_strip_crc) {
2925 PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
2926 conf->rxmode.hw_strip_crc = 0;
2934 ixgbevf_dev_start(struct rte_eth_dev *dev)
2936 struct ixgbe_hw *hw =
2937 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2940 PMD_INIT_FUNC_TRACE();
2942 hw->mac.ops.reset_hw(hw);
2943 hw->mac.get_link_status = true;
2945 /* negotiate mailbox API version to use with the PF. */
2946 ixgbevf_negotiate_api(hw);
2948 ixgbevf_dev_tx_init(dev);
2950 /* This can fail when allocating mbufs for descriptor rings */
2951 err = ixgbevf_dev_rx_init(dev);
2953 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
2954 ixgbe_dev_clear_queues(dev);
2959 ixgbevf_set_vfta_all(dev,1);
2962 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
2963 ETH_VLAN_EXTEND_MASK;
2964 ixgbevf_vlan_offload_set(dev, mask);
2966 ixgbevf_dev_rxtx_start(dev);
2972 ixgbevf_dev_stop(struct rte_eth_dev *dev)
2974 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2976 PMD_INIT_FUNC_TRACE();
2978 hw->adapter_stopped = TRUE;
2979 ixgbe_stop_adapter(hw);
2982 * Clear what we set, but we still keep shadow_vfta to
2983 * restore after device starts
2985 ixgbevf_set_vfta_all(dev,0);
2987 /* Clear stored conf */
2988 dev->data->scattered_rx = 0;
2990 ixgbe_dev_clear_queues(dev);
2994 ixgbevf_dev_close(struct rte_eth_dev *dev)
2996 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2998 PMD_INIT_FUNC_TRACE();
3002 ixgbevf_dev_stop(dev);
3004 /* reprogram the RAR[0] in case user changed it. */
3005 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
3008 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3010 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3011 struct ixgbe_vfta * shadow_vfta =
3012 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3013 int i = 0, j = 0, vfta = 0, mask = 1;
3015 for (i = 0; i < IXGBE_VFTA_SIZE; i++){
3016 vfta = shadow_vfta->vfta[i];
3019 for (j = 0; j < 32; j++){
3021 ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
3030 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3032 struct ixgbe_hw *hw =
3033 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3034 struct ixgbe_vfta * shadow_vfta =
3035 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3036 uint32_t vid_idx = 0;
3037 uint32_t vid_bit = 0;
3040 PMD_INIT_FUNC_TRACE();
3042 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
3043 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
3045 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
3048 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3049 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3051 /* Save what we set and retore it after device reset */
3053 shadow_vfta->vfta[vid_idx] |= vid_bit;
3055 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
3061 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
3063 struct ixgbe_hw *hw =
3064 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3067 PMD_INIT_FUNC_TRACE();
3069 if(queue >= hw->mac.max_rx_queues)
3072 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
3074 ctrl |= IXGBE_RXDCTL_VME;
3076 ctrl &= ~IXGBE_RXDCTL_VME;
3077 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
3079 ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
3083 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3085 struct ixgbe_hw *hw =
3086 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3090 /* VF function only support hw strip feature, others are not support */
3091 if(mask & ETH_VLAN_STRIP_MASK){
3092 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
3094 for(i=0; i < hw->mac.max_rx_queues; i++)
3095 ixgbevf_vlan_strip_queue_set(dev,i,on);
3100 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
3104 /* we only need to do this if VMDq is enabled */
3105 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3106 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
3107 PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
3115 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
3117 uint32_t vector = 0;
3118 switch (hw->mac.mc_filter_type) {
3119 case 0: /* use bits [47:36] of the address */
3120 vector = ((uc_addr->addr_bytes[4] >> 4) |
3121 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
3123 case 1: /* use bits [46:35] of the address */
3124 vector = ((uc_addr->addr_bytes[4] >> 3) |
3125 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
3127 case 2: /* use bits [45:34] of the address */
3128 vector = ((uc_addr->addr_bytes[4] >> 2) |
3129 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
3131 case 3: /* use bits [43:32] of the address */
3132 vector = ((uc_addr->addr_bytes[4]) |
3133 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
3135 default: /* Invalid mc_filter_type */
3139 /* vector can only be 12-bits or boundary will be exceeded */
3145 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
3153 const uint32_t ixgbe_uta_idx_mask = 0x7F;
3154 const uint32_t ixgbe_uta_bit_shift = 5;
3155 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
3156 const uint32_t bit1 = 0x1;
3158 struct ixgbe_hw *hw =
3159 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3160 struct ixgbe_uta_info *uta_info =
3161 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
3163 /* The UTA table only exists on 82599 hardware and newer */
3164 if (hw->mac.type < ixgbe_mac_82599EB)
3167 vector = ixgbe_uta_vector(hw,mac_addr);
3168 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
3169 uta_shift = vector & ixgbe_uta_bit_mask;
3171 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
3175 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
3177 uta_info->uta_in_use++;
3178 reg_val |= (bit1 << uta_shift);
3179 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
3181 uta_info->uta_in_use--;
3182 reg_val &= ~(bit1 << uta_shift);
3183 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
3186 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
3188 if (uta_info->uta_in_use > 0)
3189 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
3190 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
3192 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
3198 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3201 struct ixgbe_hw *hw =
3202 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3203 struct ixgbe_uta_info *uta_info =
3204 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
3206 /* The UTA table only exists on 82599 hardware and newer */
3207 if (hw->mac.type < ixgbe_mac_82599EB)
3211 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3212 uta_info->uta_shadow[i] = ~0;
3213 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
3216 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3217 uta_info->uta_shadow[i] = 0;
3218 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3226 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3228 uint32_t new_val = orig_val;
3230 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3231 new_val |= IXGBE_VMOLR_AUPE;
3232 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3233 new_val |= IXGBE_VMOLR_ROMPE;
3234 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3235 new_val |= IXGBE_VMOLR_ROPE;
3236 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3237 new_val |= IXGBE_VMOLR_BAM;
3238 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3239 new_val |= IXGBE_VMOLR_MPE;
3245 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
3246 uint16_t rx_mask, uint8_t on)
3250 struct ixgbe_hw *hw =
3251 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3252 uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
3254 if (hw->mac.type == ixgbe_mac_82598EB) {
3255 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
3256 " on 82599 hardware and newer");
3259 if (ixgbe_vmdq_mode_check(hw) < 0)
3262 val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
3269 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
3275 ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
3279 const uint8_t bit1 = 0x1;
3281 struct ixgbe_hw *hw =
3282 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3284 if (ixgbe_vmdq_mode_check(hw) < 0)
3287 addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
3288 reg = IXGBE_READ_REG(hw, addr);
3296 IXGBE_WRITE_REG(hw, addr,reg);
3302 ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
3306 const uint8_t bit1 = 0x1;
3308 struct ixgbe_hw *hw =
3309 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3311 if (ixgbe_vmdq_mode_check(hw) < 0)
3314 addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
3315 reg = IXGBE_READ_REG(hw, addr);
3323 IXGBE_WRITE_REG(hw, addr,reg);
3329 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
3330 uint64_t pool_mask, uint8_t vlan_on)
3334 struct ixgbe_hw *hw =
3335 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3337 if (ixgbe_vmdq_mode_check(hw) < 0)
3339 for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
3340 if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
3341 ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
3350 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
3351 struct rte_eth_vmdq_mirror_conf *mirror_conf,
3352 uint8_t rule_id, uint8_t on)
3354 uint32_t mr_ctl,vlvf;
3355 uint32_t mp_lsb = 0;
3356 uint32_t mv_msb = 0;
3357 uint32_t mv_lsb = 0;
3358 uint32_t mp_msb = 0;
3361 uint64_t vlan_mask = 0;
3363 const uint8_t pool_mask_offset = 32;
3364 const uint8_t vlan_mask_offset = 32;
3365 const uint8_t dst_pool_offset = 8;
3366 const uint8_t rule_mr_offset = 4;
3367 const uint8_t mirror_rule_mask= 0x0F;
3369 struct ixgbe_mirror_info *mr_info =
3370 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
3371 struct ixgbe_hw *hw =
3372 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3374 if (ixgbe_vmdq_mode_check(hw) < 0)
3377 /* Check if vlan mask is valid */
3378 if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) {
3379 if (mirror_conf->vlan.vlan_mask == 0)
3383 /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
3384 if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
3385 for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
3386 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
3387 /* search vlan id related pool vlan filter index */
3388 reg_index = ixgbe_find_vlvf_slot(hw,
3389 mirror_conf->vlan.vlan_id[i]);
3392 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
3393 if ((vlvf & IXGBE_VLVF_VIEN) &&
3394 ((vlvf & IXGBE_VLVF_VLANID_MASK)
3395 == mirror_conf->vlan.vlan_id[i]))
3396 vlan_mask |= (1ULL << reg_index);
3403 mv_lsb = vlan_mask & 0xFFFFFFFF;
3404 mv_msb = vlan_mask >> vlan_mask_offset;
3406 mr_info->mr_conf[rule_id].vlan.vlan_mask =
3407 mirror_conf->vlan.vlan_mask;
3408 for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
3409 if(mirror_conf->vlan.vlan_mask & (1ULL << i))
3410 mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
3411 mirror_conf->vlan.vlan_id[i];
3416 mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
3417 for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
3418 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
3423 * if enable pool mirror, write related pool mask register,if disable
3424 * pool mirror, clear PFMRVM register
3426 if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
3428 mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
3429 mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
3430 mr_info->mr_conf[rule_id].pool_mask =
3431 mirror_conf->pool_mask;
3436 mr_info->mr_conf[rule_id].pool_mask = 0;
3440 /* read mirror control register and recalculate it */
3441 mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id));
3444 mr_ctl |= mirror_conf->rule_type_mask;
3445 mr_ctl &= mirror_rule_mask;
3446 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
3448 mr_ctl &= ~(mirror_conf->rule_type_mask & mirror_rule_mask);
3450 mr_info->mr_conf[rule_id].rule_type_mask = (uint8_t)(mr_ctl & mirror_rule_mask);
3451 mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
3453 /* write mirrror control register */
3454 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
3456 /* write pool mirrror control register */
3457 if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
3458 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
3459 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
3462 /* write VLAN mirrror control register */
3463 if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
3464 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
3465 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
3473 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
3476 uint32_t lsb_val = 0;
3477 uint32_t msb_val = 0;
3478 const uint8_t rule_mr_offset = 4;
3480 struct ixgbe_hw *hw =
3481 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3482 struct ixgbe_mirror_info *mr_info =
3483 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
3485 if (ixgbe_vmdq_mode_check(hw) < 0)
3488 memset(&mr_info->mr_conf[rule_id], 0,
3489 sizeof(struct rte_eth_vmdq_mirror_conf));
3491 /* clear PFVMCTL register */
3492 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
3494 /* clear pool mask register */
3495 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
3496 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
3498 /* clear vlan mask register */
3499 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
3500 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
3505 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3506 uint16_t queue_idx, uint16_t tx_rate)
3508 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3509 uint32_t rf_dec, rf_int;
3511 uint16_t link_speed = dev->data->dev_link.link_speed;
3513 if (queue_idx >= hw->mac.max_tx_queues)
3517 /* Calculate the rate factor values to set */
3518 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
3519 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
3520 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
3522 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
3523 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
3524 IXGBE_RTTBCNRC_RF_INT_MASK_M);
3525 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
3531 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
3532 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
3535 if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
3536 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
3537 IXGBE_MAX_JUMBO_FRAME_SIZE))
3538 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
3539 IXGBE_MMW_SIZE_JUMBO_FRAME);
3541 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
3542 IXGBE_MMW_SIZE_DEFAULT);
3544 /* Set RTTBCNRC of queue X */
3545 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
3546 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
3547 IXGBE_WRITE_FLUSH(hw);
3552 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
3553 uint16_t tx_rate, uint64_t q_msk)
3555 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3556 struct ixgbe_vf_info *vfinfo =
3557 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
3558 uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
3559 uint32_t queue_stride =
3560 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
3561 uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
3562 uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
3563 uint16_t total_rate = 0;
3565 if (queue_end >= hw->mac.max_tx_queues)
3568 if (vfinfo != NULL) {
3569 for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
3572 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
3574 total_rate += vfinfo[vf_idx].tx_rate[idx];
3579 /* Store tx_rate for this vf. */
3580 for (idx = 0; idx < nb_q_per_pool; idx++) {
3581 if (((uint64_t)0x1 << idx) & q_msk) {
3582 if (vfinfo[vf].tx_rate[idx] != tx_rate)
3583 vfinfo[vf].tx_rate[idx] = tx_rate;
3584 total_rate += tx_rate;
3588 if (total_rate > dev->data->dev_link.link_speed) {
3590 * Reset stored TX rate of the VF if it causes exceed
3593 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
3597 /* Set RTTBCNRC of each queue/pool for vf X */
3598 for (; queue_idx <= queue_end; queue_idx++) {
3600 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
3608 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
3609 __attribute__((unused)) uint32_t index,
3610 __attribute__((unused)) uint32_t pool)
3612 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3616 * On a 82599 VF, adding again the same MAC addr is not an idempotent
3617 * operation. Trap this case to avoid exhausting the [very limited]
3618 * set of PF resources used to store VF MAC addresses.
3620 if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
3622 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
3625 PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
3629 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
3631 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3632 struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
3633 struct ether_addr *mac_addr;
3638 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
3639 * not support the deletion of a given MAC address.
3640 * Instead, it imposes to delete all MAC addresses, then to add again
3641 * all MAC addresses with the exception of the one to be deleted.
3643 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
3646 * Add again all MAC addresses, with the exception of the deleted one
3647 * and of the permanent MAC address.
3649 for (i = 0, mac_addr = dev->data->mac_addrs;
3650 i < hw->mac.num_rar_entries; i++, mac_addr++) {
3651 /* Skip the deleted MAC address */
3654 /* Skip NULL MAC addresses */
3655 if (is_zero_ether_addr(mac_addr))
3657 /* Skip the permanent MAC address */
3658 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
3660 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
3663 "Adding again MAC address "
3664 "%02x:%02x:%02x:%02x:%02x:%02x failed "
3666 mac_addr->addr_bytes[0],
3667 mac_addr->addr_bytes[1],
3668 mac_addr->addr_bytes[2],
3669 mac_addr->addr_bytes[3],
3670 mac_addr->addr_bytes[4],
3671 mac_addr->addr_bytes[5],
3680 * dev: Pointer to struct rte_eth_dev.
3681 * filter: ponter to the filter that will be added.
3682 * rx_queue: the queue id the filter assigned to.
3685 * - On success, zero.
3686 * - On failure, a negative value.
3689 ixgbe_add_syn_filter(struct rte_eth_dev *dev,
3690 struct rte_syn_filter *filter, uint16_t rx_queue)
3692 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3695 if (hw->mac.type != ixgbe_mac_82599EB)
3698 if (rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
3701 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
3703 if (synqf & IXGBE_SYN_FILTER_ENABLE)
3706 synqf = (uint32_t)(((rx_queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
3707 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
3709 if (filter->hig_pri)
3710 synqf |= IXGBE_SYN_FILTER_SYNQFP;
3712 synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
3714 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
3722 * dev: Pointer to struct rte_eth_dev.
3725 * - On success, zero.
3726 * - On failure, a negative value.
3729 ixgbe_remove_syn_filter(struct rte_eth_dev *dev)
3731 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3734 if (hw->mac.type != ixgbe_mac_82599EB)
3737 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
3739 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
3741 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
3746 * get the syn filter's info
3749 * dev: Pointer to struct rte_eth_dev.
3750 * filter: ponter to the filter that returns.
3751 * *rx_queue: pointer to the queue id the filter assigned to.
3754 * - On success, zero.
3755 * - On failure, a negative value.
3758 ixgbe_get_syn_filter(struct rte_eth_dev *dev,
3759 struct rte_syn_filter *filter, uint16_t *rx_queue)
3762 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3765 if (hw->mac.type != ixgbe_mac_82599EB)
3768 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
3769 if (synqf & IXGBE_SYN_FILTER_ENABLE) {
3770 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
3771 *rx_queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
3778 * add an ethertype filter
3781 * dev: Pointer to struct rte_eth_dev.
3782 * index: the index the filter allocates.
3783 * filter: ponter to the filter that will be added.
3784 * rx_queue: the queue id the filter assigned to.
3787 * - On success, zero.
3788 * - On failure, a negative value.
3791 ixgbe_add_ethertype_filter(struct rte_eth_dev *dev,
3792 uint16_t index, struct rte_ethertype_filter *filter,
3795 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3796 uint32_t etqf, etqs = 0;
3798 if (hw->mac.type != ixgbe_mac_82599EB)
3801 if (index >= IXGBE_MAX_ETQF_FILTERS ||
3802 rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
3805 etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index));
3806 if (etqf & IXGBE_ETQF_FILTER_EN)
3807 return -EINVAL; /* filter index is in use. */
3810 etqf |= IXGBE_ETQF_FILTER_EN;
3811 etqf |= (uint32_t)filter->ethertype;
3813 if (filter->priority_en) {
3814 if (filter->priority > IXGBE_ETQF_MAX_PRI)
3816 etqf |= (uint32_t)((filter->priority << IXGBE_ETQF_SHIFT) & IXGBE_ETQF_UP);
3817 etqf |= IXGBE_ETQF_UP_EN;
3819 etqs |= (uint32_t)((rx_queue << IXGBE_ETQS_RX_QUEUE_SHIFT) & IXGBE_ETQS_RX_QUEUE);
3820 etqs |= IXGBE_ETQS_QUEUE_EN;
3822 IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), etqf);
3823 IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), etqs);
3828 * remove an ethertype filter
3831 * dev: Pointer to struct rte_eth_dev.
3832 * index: the index the filter allocates.
3835 * - On success, zero.
3836 * - On failure, a negative value.
3839 ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev,
3842 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3844 if (hw->mac.type != ixgbe_mac_82599EB)
3847 if (index >= IXGBE_MAX_ETQF_FILTERS)
3850 IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), 0);
3851 IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), 0);
3857 * get an ethertype filter
3860 * dev: Pointer to struct rte_eth_dev.
3861 * index: the index the filter allocates.
3862 * filter: ponter to the filter that will be gotten.
3863 * *rx_queue: the ponited of the queue id the filter assigned to.
3866 * - On success, zero.
3867 * - On failure, a negative value.
3870 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
3871 uint16_t index, struct rte_ethertype_filter *filter,
3874 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3875 uint32_t etqf, etqs;
3877 if (hw->mac.type != ixgbe_mac_82599EB)
3880 if (index >= IXGBE_MAX_ETQF_FILTERS)
3883 etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index));
3884 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(index));
3885 if (etqf & IXGBE_ETQF_FILTER_EN) {
3886 filter->ethertype = etqf & IXGBE_ETQF_ETHERTYPE;
3887 filter->priority_en = (etqf & IXGBE_ETQF_UP_EN) ? 1 : 0;
3888 if (filter->priority_en)
3889 filter->priority = (etqf & IXGBE_ETQF_UP) >> 16;
3890 *rx_queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> IXGBE_ETQS_RX_QUEUE_SHIFT;
3896 static inline enum ixgbe_5tuple_protocol
3897 convert_protocol_type(uint8_t protocol_value)
3899 if (protocol_value == IPPROTO_TCP)
3900 return IXGBE_FILTER_PROTOCOL_TCP;
3901 else if (protocol_value == IPPROTO_UDP)
3902 return IXGBE_FILTER_PROTOCOL_UDP;
3903 else if (protocol_value == IPPROTO_SCTP)
3904 return IXGBE_FILTER_PROTOCOL_SCTP;
3906 return IXGBE_FILTER_PROTOCOL_NONE;
3909 static inline uint8_t
3910 revert_protocol_type(enum ixgbe_5tuple_protocol protocol)
3912 if (protocol == IXGBE_FILTER_PROTOCOL_TCP)
3914 else if (protocol == IXGBE_FILTER_PROTOCOL_UDP)
3916 else if (protocol == IXGBE_FILTER_PROTOCOL_SCTP)
3917 return IPPROTO_SCTP;
3923 * add a 5tuple filter
3926 * dev: Pointer to struct rte_eth_dev.
3927 * index: the index the filter allocates.
3928 * filter: ponter to the filter that will be added.
3929 * rx_queue: the queue id the filter assigned to.
3932 * - On success, zero.
3933 * - On failure, a negative value.
3936 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
3937 struct rte_5tuple_filter *filter, uint16_t rx_queue)
3939 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3940 uint32_t ftqf, sdpqf = 0;
3941 uint32_t l34timir = 0;
3942 uint8_t mask = 0xff;
3944 if (hw->mac.type != ixgbe_mac_82599EB)
3947 if (index >= IXGBE_MAX_FTQF_FILTERS ||
3948 rx_queue >= IXGBE_MAX_RX_QUEUE_NUM ||
3949 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
3950 filter->priority < IXGBE_5TUPLE_MIN_PRI)
3951 return -EINVAL; /* filter index is out of range. */
3953 if (filter->tcp_flags) {
3954 PMD_INIT_LOG(INFO, "82599EB not tcp flags in 5tuple");
3958 ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
3959 if (ftqf & IXGBE_FTQF_QUEUE_ENABLE)
3960 return -EINVAL; /* filter index is in use. */
3963 sdpqf = (uint32_t)(filter->dst_port << IXGBE_SDPQF_DSTPORT_SHIFT);
3964 sdpqf = sdpqf | (filter->src_port & IXGBE_SDPQF_SRCPORT);
3966 ftqf |= (uint32_t)(convert_protocol_type(filter->protocol) &
3967 IXGBE_FTQF_PROTOCOL_MASK);
3968 ftqf |= (uint32_t)((filter->priority & IXGBE_FTQF_PRIORITY_MASK) <<
3969 IXGBE_FTQF_PRIORITY_SHIFT);
3970 if (filter->src_ip_mask == 0) /* 0 means compare. */
3971 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
3972 if (filter->dst_ip_mask == 0)
3973 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
3974 if (filter->src_port_mask == 0)
3975 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
3976 if (filter->dst_port_mask == 0)
3977 mask &= IXGBE_FTQF_DEST_PORT_MASK;
3978 if (filter->protocol_mask == 0)
3979 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
3980 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
3981 ftqf |= IXGBE_FTQF_POOL_MASK_EN;
3982 ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
3984 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), filter->dst_ip);
3985 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), filter->src_ip);
3986 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), sdpqf);
3987 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), ftqf);
3989 l34timir |= IXGBE_L34T_IMIR_RESERVE;
3990 l34timir |= (uint32_t)(rx_queue << IXGBE_L34T_IMIR_QUEUE_SHIFT);
3991 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), l34timir);
3996 * remove a 5tuple filter
3999 * dev: Pointer to struct rte_eth_dev.
4000 * index: the index the filter allocates.
4003 * - On success, zero.
4004 * - On failure, a negative value.
4007 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
4010 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4012 if (hw->mac.type != ixgbe_mac_82599EB)
4015 if (index >= IXGBE_MAX_FTQF_FILTERS)
4016 return -EINVAL; /* filter index is out of range. */
4018 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
4019 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
4020 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
4021 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
4022 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
4027 * get a 5tuple filter
4030 * dev: Pointer to struct rte_eth_dev.
4031 * index: the index the filter allocates
4032 * filter: ponter to the filter that returns.
4033 * *rx_queue: pointer of the queue id the filter assigned to.
4036 * - On success, zero.
4037 * - On failure, a negative value.
4040 ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
4041 struct rte_5tuple_filter *filter, uint16_t *rx_queue)
4043 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4044 uint32_t sdpqf, ftqf, l34timir;
4046 enum ixgbe_5tuple_protocol proto;
4048 if (hw->mac.type != ixgbe_mac_82599EB)
4051 if (index >= IXGBE_MAX_FTQF_FILTERS)
4052 return -EINVAL; /* filter index is out of range. */
4054 ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
4055 if (ftqf & IXGBE_FTQF_QUEUE_ENABLE) {
4056 proto = (enum ixgbe_5tuple_protocol)(ftqf & IXGBE_FTQF_PROTOCOL_MASK);
4057 filter->protocol = revert_protocol_type(proto);
4058 filter->priority = (ftqf >> IXGBE_FTQF_PRIORITY_SHIFT) &
4059 IXGBE_FTQF_PRIORITY_MASK;
4060 mask = (uint8_t)((ftqf >> IXGBE_FTQF_5TUPLE_MASK_SHIFT) &
4061 IXGBE_FTQF_5TUPLE_MASK_MASK);
4062 filter->src_ip_mask =
4063 (mask & IXGBE_FTQF_SOURCE_ADDR_MASK) ? 1 : 0;
4064 filter->dst_ip_mask =
4065 (mask & IXGBE_FTQF_DEST_ADDR_MASK) ? 1 : 0;
4066 filter->src_port_mask =
4067 (mask & IXGBE_FTQF_SOURCE_PORT_MASK) ? 1 : 0;
4068 filter->dst_port_mask =
4069 (mask & IXGBE_FTQF_DEST_PORT_MASK) ? 1 : 0;
4070 filter->protocol_mask =
4071 (mask & IXGBE_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0;
4073 sdpqf = IXGBE_READ_REG(hw, IXGBE_SDPQF(index));
4074 filter->dst_port = (sdpqf & IXGBE_SDPQF_DSTPORT) >>
4075 IXGBE_SDPQF_DSTPORT_SHIFT;
4076 filter->src_port = sdpqf & IXGBE_SDPQF_SRCPORT;
4077 filter->dst_ip = IXGBE_READ_REG(hw, IXGBE_DAQF(index));
4078 filter->src_ip = IXGBE_READ_REG(hw, IXGBE_SAQF(index));
4080 l34timir = IXGBE_READ_REG(hw, IXGBE_L34T_IMIR(index));
4081 *rx_queue = (l34timir & IXGBE_L34T_IMIR_QUEUE) >>
4082 IXGBE_L34T_IMIR_QUEUE_SHIFT;
4089 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
4091 struct ixgbe_hw *hw;
4092 uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4094 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4096 if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
4099 /* refuse mtu that requires the support of scattered packets when this
4100 * feature has not been enabled before. */
4101 if (!dev->data->scattered_rx &&
4102 (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
4103 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
4107 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
4108 * request of the version 2.0 of the mailbox API.
4109 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
4110 * of the mailbox API.
4111 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
4112 * prior to 3.11.33 which contains the following change:
4113 * "ixgbe: Enable jumbo frames support w/ SR-IOV"
4115 ixgbevf_rlpml_set_vf(hw, max_frame);
4117 /* update max frame size */
4118 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
4122 static struct rte_driver rte_ixgbe_driver = {
4124 .init = rte_ixgbe_pmd_init,
4127 static struct rte_driver rte_ixgbevf_driver = {
4129 .init = rte_ixgbevf_pmd_init,
4132 PMD_REGISTER_DRIVER(rte_ixgbe_driver);
4133 PMD_REGISTER_DRIVER(rte_ixgbevf_driver);