4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
64 #include "ixgbe_logs.h"
65 #include "base/ixgbe_api.h"
66 #include "base/ixgbe_vf.h"
67 #include "base/ixgbe_common.h"
68 #include "ixgbe_ethdev.h"
69 #include "ixgbe_bypass.h"
70 #include "ixgbe_rxtx.h"
71 #include "base/ixgbe_type.h"
72 #include "base/ixgbe_phy.h"
73 #include "ixgbe_regs.h"
76 * High threshold controlling when to start sending XOFF frames. Must be at
77 * least 8 bytes less than receive packet buffer size. This value is in units
80 #define IXGBE_FC_HI 0x80
83 * Low threshold controlling when to start sending XON frames. This value is
84 * in units of 1024 bytes.
86 #define IXGBE_FC_LO 0x40
88 /* Default minimum inter-interrupt interval for EITR configuration */
89 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E
91 /* Timer value included in XOFF frames. */
92 #define IXGBE_FC_PAUSE 0x680
94 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
95 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
96 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
98 #define IXGBE_MMW_SIZE_DEFAULT 0x4
99 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14
100 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */
103 * Default values for RX/TX configuration
105 #define IXGBE_DEFAULT_RX_FREE_THRESH 32
106 #define IXGBE_DEFAULT_RX_PTHRESH 8
107 #define IXGBE_DEFAULT_RX_HTHRESH 8
108 #define IXGBE_DEFAULT_RX_WTHRESH 0
110 #define IXGBE_DEFAULT_TX_FREE_THRESH 32
111 #define IXGBE_DEFAULT_TX_PTHRESH 32
112 #define IXGBE_DEFAULT_TX_HTHRESH 0
113 #define IXGBE_DEFAULT_TX_WTHRESH 0
114 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
116 /* Bit shift and mask */
117 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2)
118 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
119 #define IXGBE_8_BIT_WIDTH CHAR_BIT
120 #define IXGBE_8_BIT_MASK UINT8_MAX
122 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
124 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
126 #define IXGBE_HKEY_MAX_INDEX 10
128 /* Additional timesync values. */
129 #define NSEC_PER_SEC 1000000000L
130 #define IXGBE_INCVAL_10GB 0x66666666
131 #define IXGBE_INCVAL_1GB 0x40000000
132 #define IXGBE_INCVAL_100 0x50000000
133 #define IXGBE_INCVAL_SHIFT_10GB 28
134 #define IXGBE_INCVAL_SHIFT_1GB 24
135 #define IXGBE_INCVAL_SHIFT_100 21
136 #define IXGBE_INCVAL_SHIFT_82599 7
137 #define IXGBE_INCPER_SHIFT_82599 24
139 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
141 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
142 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
143 static int ixgbe_dev_configure(struct rte_eth_dev *dev);
144 static int ixgbe_dev_start(struct rte_eth_dev *dev);
145 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
146 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
147 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
148 static void ixgbe_dev_close(struct rte_eth_dev *dev);
149 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
150 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
151 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
152 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
153 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
154 int wait_to_complete);
155 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
156 struct rte_eth_stats *stats);
157 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
158 struct rte_eth_xstats *xstats, unsigned n);
159 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
160 struct rte_eth_xstats *xstats, unsigned n);
161 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
162 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
163 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
167 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
168 struct rte_eth_dev_info *dev_info);
169 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
170 struct rte_eth_dev_info *dev_info);
171 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
173 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
174 uint16_t vlan_id, int on);
175 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
176 enum rte_vlan_type vlan_type,
178 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
179 uint16_t queue, bool on);
180 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
182 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
183 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
184 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
185 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
186 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
188 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
189 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
190 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
191 struct rte_eth_fc_conf *fc_conf);
192 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
193 struct rte_eth_fc_conf *fc_conf);
194 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
195 struct rte_eth_pfc_conf *pfc_conf);
196 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
197 struct rte_eth_rss_reta_entry64 *reta_conf,
199 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
200 struct rte_eth_rss_reta_entry64 *reta_conf,
202 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
203 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
204 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
205 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
206 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
207 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
209 static void ixgbe_dev_interrupt_delayed_handler(void *param);
210 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
211 uint32_t index, uint32_t pool);
212 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
213 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
214 struct ether_addr *mac_addr);
215 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
217 /* For Virtual Function support */
218 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
219 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
220 static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
221 static int ixgbevf_dev_start(struct rte_eth_dev *dev);
222 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
223 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
224 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
225 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
226 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
227 struct rte_eth_stats *stats);
228 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
229 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
230 uint16_t vlan_id, int on);
231 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
232 uint16_t queue, int on);
233 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
234 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
235 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
237 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
239 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
240 uint8_t queue, uint8_t msix_vector);
241 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
243 /* For Eth VMDQ APIs support */
244 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
245 ether_addr* mac_addr,uint8_t on);
246 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
247 static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
248 uint16_t rx_mask, uint8_t on);
249 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
250 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
251 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
252 uint64_t pool_mask,uint8_t vlan_on);
253 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
254 struct rte_eth_mirror_conf *mirror_conf,
255 uint8_t rule_id, uint8_t on);
256 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
258 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
260 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
262 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
263 uint8_t queue, uint8_t msix_vector);
264 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
266 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
267 uint16_t queue_idx, uint16_t tx_rate);
268 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
269 uint16_t tx_rate, uint64_t q_msk);
271 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
272 struct ether_addr *mac_addr,
273 uint32_t index, uint32_t pool);
274 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
275 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
276 struct ether_addr *mac_addr);
277 static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
278 struct rte_eth_syn_filter *filter,
280 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
281 struct rte_eth_syn_filter *filter);
282 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
283 enum rte_filter_op filter_op,
285 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
286 struct ixgbe_5tuple_filter *filter);
287 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
288 struct ixgbe_5tuple_filter *filter);
289 static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
290 struct rte_eth_ntuple_filter *filter,
292 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
293 enum rte_filter_op filter_op,
295 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
296 struct rte_eth_ntuple_filter *filter);
297 static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
298 struct rte_eth_ethertype_filter *filter,
300 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
301 enum rte_filter_op filter_op,
303 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
304 struct rte_eth_ethertype_filter *filter);
305 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
306 enum rte_filter_type filter_type,
307 enum rte_filter_op filter_op,
309 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
311 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
312 struct ether_addr *mc_addr_set,
313 uint32_t nb_mc_addr);
314 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
315 struct rte_eth_dcb_info *dcb_info);
317 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
318 static int ixgbe_get_regs(struct rte_eth_dev *dev,
319 struct rte_dev_reg_info *regs);
320 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
321 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
322 struct rte_dev_eeprom_info *eeprom);
323 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
324 struct rte_dev_eeprom_info *eeprom);
326 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
327 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
328 struct rte_dev_reg_info *regs);
330 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
331 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
332 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
333 struct timespec *timestamp,
335 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
336 struct timespec *timestamp);
337 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
338 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
339 struct timespec *timestamp);
340 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
341 const struct timespec *timestamp);
344 * Define VF Stats MACRO for Non "cleared on read" register
346 #define UPDATE_VF_STAT(reg, last, cur) \
348 uint32_t latest = IXGBE_READ_REG(hw, reg); \
349 cur += (latest - last) & UINT_MAX; \
353 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \
355 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \
356 u64 new_msb = IXGBE_READ_REG(hw, msb); \
357 u64 latest = ((new_msb << 32) | new_lsb); \
358 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
362 #define IXGBE_SET_HWSTRIP(h, q) do{\
363 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
364 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
365 (h)->bitmap[idx] |= 1 << bit;\
368 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
369 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
370 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
371 (h)->bitmap[idx] &= ~(1 << bit);\
374 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
375 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
376 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
377 (r) = (h)->bitmap[idx] >> bit & 1;\
381 * The set of PCI devices this driver supports
383 static const struct rte_pci_id pci_id_ixgbe_map[] = {
385 #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
386 #include "rte_pci_dev_ids.h"
388 { .vendor_id = 0, /* sentinel */ },
393 * The set of PCI devices this driver supports (for 82599 VF)
395 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
397 #define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
398 #include "rte_pci_dev_ids.h"
399 { .vendor_id = 0, /* sentinel */ },
403 static const struct rte_eth_desc_lim rx_desc_lim = {
404 .nb_max = IXGBE_MAX_RING_DESC,
405 .nb_min = IXGBE_MIN_RING_DESC,
406 .nb_align = IXGBE_RXD_ALIGN,
409 static const struct rte_eth_desc_lim tx_desc_lim = {
410 .nb_max = IXGBE_MAX_RING_DESC,
411 .nb_min = IXGBE_MIN_RING_DESC,
412 .nb_align = IXGBE_TXD_ALIGN,
415 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
416 .dev_configure = ixgbe_dev_configure,
417 .dev_start = ixgbe_dev_start,
418 .dev_stop = ixgbe_dev_stop,
419 .dev_set_link_up = ixgbe_dev_set_link_up,
420 .dev_set_link_down = ixgbe_dev_set_link_down,
421 .dev_close = ixgbe_dev_close,
422 .promiscuous_enable = ixgbe_dev_promiscuous_enable,
423 .promiscuous_disable = ixgbe_dev_promiscuous_disable,
424 .allmulticast_enable = ixgbe_dev_allmulticast_enable,
425 .allmulticast_disable = ixgbe_dev_allmulticast_disable,
426 .link_update = ixgbe_dev_link_update,
427 .stats_get = ixgbe_dev_stats_get,
428 .xstats_get = ixgbe_dev_xstats_get,
429 .stats_reset = ixgbe_dev_stats_reset,
430 .xstats_reset = ixgbe_dev_xstats_reset,
431 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
432 .dev_infos_get = ixgbe_dev_info_get,
433 .mtu_set = ixgbe_dev_mtu_set,
434 .vlan_filter_set = ixgbe_vlan_filter_set,
435 .vlan_tpid_set = ixgbe_vlan_tpid_set,
436 .vlan_offload_set = ixgbe_vlan_offload_set,
437 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
438 .rx_queue_start = ixgbe_dev_rx_queue_start,
439 .rx_queue_stop = ixgbe_dev_rx_queue_stop,
440 .tx_queue_start = ixgbe_dev_tx_queue_start,
441 .tx_queue_stop = ixgbe_dev_tx_queue_stop,
442 .rx_queue_setup = ixgbe_dev_rx_queue_setup,
443 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
444 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
445 .rx_queue_release = ixgbe_dev_rx_queue_release,
446 .rx_queue_count = ixgbe_dev_rx_queue_count,
447 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
448 .tx_queue_setup = ixgbe_dev_tx_queue_setup,
449 .tx_queue_release = ixgbe_dev_tx_queue_release,
450 .dev_led_on = ixgbe_dev_led_on,
451 .dev_led_off = ixgbe_dev_led_off,
452 .flow_ctrl_get = ixgbe_flow_ctrl_get,
453 .flow_ctrl_set = ixgbe_flow_ctrl_set,
454 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
455 .mac_addr_add = ixgbe_add_rar,
456 .mac_addr_remove = ixgbe_remove_rar,
457 .mac_addr_set = ixgbe_set_default_mac_addr,
458 .uc_hash_table_set = ixgbe_uc_hash_table_set,
459 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
460 .mirror_rule_set = ixgbe_mirror_rule_set,
461 .mirror_rule_reset = ixgbe_mirror_rule_reset,
462 .set_vf_rx_mode = ixgbe_set_pool_rx_mode,
463 .set_vf_rx = ixgbe_set_pool_rx,
464 .set_vf_tx = ixgbe_set_pool_tx,
465 .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter,
466 .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
467 .set_vf_rate_limit = ixgbe_set_vf_rate_limit,
468 .reta_update = ixgbe_dev_rss_reta_update,
469 .reta_query = ixgbe_dev_rss_reta_query,
470 #ifdef RTE_NIC_BYPASS
471 .bypass_init = ixgbe_bypass_init,
472 .bypass_state_set = ixgbe_bypass_state_store,
473 .bypass_state_show = ixgbe_bypass_state_show,
474 .bypass_event_set = ixgbe_bypass_event_store,
475 .bypass_event_show = ixgbe_bypass_event_show,
476 .bypass_wd_timeout_set = ixgbe_bypass_wd_timeout_store,
477 .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
478 .bypass_ver_show = ixgbe_bypass_ver_show,
479 .bypass_wd_reset = ixgbe_bypass_wd_reset,
480 #endif /* RTE_NIC_BYPASS */
481 .rss_hash_update = ixgbe_dev_rss_hash_update,
482 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
483 .filter_ctrl = ixgbe_dev_filter_ctrl,
484 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
485 .rxq_info_get = ixgbe_rxq_info_get,
486 .txq_info_get = ixgbe_txq_info_get,
487 .timesync_enable = ixgbe_timesync_enable,
488 .timesync_disable = ixgbe_timesync_disable,
489 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
490 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
491 .get_reg_length = ixgbe_get_reg_length,
492 .get_reg = ixgbe_get_regs,
493 .get_eeprom_length = ixgbe_get_eeprom_length,
494 .get_eeprom = ixgbe_get_eeprom,
495 .set_eeprom = ixgbe_set_eeprom,
496 .get_dcb_info = ixgbe_dev_get_dcb_info,
497 .timesync_adjust_time = ixgbe_timesync_adjust_time,
498 .timesync_read_time = ixgbe_timesync_read_time,
499 .timesync_write_time = ixgbe_timesync_write_time,
503 * dev_ops for virtual function, bare necessities for basic vf
504 * operation have been implemented
506 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
507 .dev_configure = ixgbevf_dev_configure,
508 .dev_start = ixgbevf_dev_start,
509 .dev_stop = ixgbevf_dev_stop,
510 .link_update = ixgbe_dev_link_update,
511 .stats_get = ixgbevf_dev_stats_get,
512 .xstats_get = ixgbevf_dev_xstats_get,
513 .stats_reset = ixgbevf_dev_stats_reset,
514 .xstats_reset = ixgbevf_dev_stats_reset,
515 .dev_close = ixgbevf_dev_close,
516 .dev_infos_get = ixgbevf_dev_info_get,
517 .mtu_set = ixgbevf_dev_set_mtu,
518 .vlan_filter_set = ixgbevf_vlan_filter_set,
519 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
520 .vlan_offload_set = ixgbevf_vlan_offload_set,
521 .rx_queue_setup = ixgbe_dev_rx_queue_setup,
522 .rx_queue_release = ixgbe_dev_rx_queue_release,
523 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
524 .tx_queue_setup = ixgbe_dev_tx_queue_setup,
525 .tx_queue_release = ixgbe_dev_tx_queue_release,
526 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
527 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
528 .mac_addr_add = ixgbevf_add_mac_addr,
529 .mac_addr_remove = ixgbevf_remove_mac_addr,
530 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
531 .rxq_info_get = ixgbe_rxq_info_get,
532 .txq_info_get = ixgbe_txq_info_get,
533 .mac_addr_set = ixgbevf_set_default_mac_addr,
534 .get_reg_length = ixgbevf_get_reg_length,
535 .get_reg = ixgbevf_get_regs,
536 .reta_update = ixgbe_dev_rss_reta_update,
537 .reta_query = ixgbe_dev_rss_reta_query,
538 .rss_hash_update = ixgbe_dev_rss_hash_update,
539 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
542 /* store statistics names and its offset in stats structure */
543 struct rte_ixgbe_xstats_name_off {
544 char name[RTE_ETH_XSTATS_NAME_SIZE];
548 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
549 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
550 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
551 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
552 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
553 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
554 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
555 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
556 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
557 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
558 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
559 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
560 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
561 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
562 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
563 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
565 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
567 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
568 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
569 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
570 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
571 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
572 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
573 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
574 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
575 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
576 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
577 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
578 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
579 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
580 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
581 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
582 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
583 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
585 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
587 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
588 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
589 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
590 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
592 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
594 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
596 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
598 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
600 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
602 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
605 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
606 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
607 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
609 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
610 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
611 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
612 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
613 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
615 {"rx_fcoe_no_direct_data_placement_ext_buff",
616 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
618 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
620 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
622 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
624 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
626 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
629 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
630 sizeof(rte_ixgbe_stats_strings[0]))
632 /* Per-queue statistics */
633 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
634 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
635 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
636 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
637 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
640 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
641 sizeof(rte_ixgbe_rxq_strings[0]))
643 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
644 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
645 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
646 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
650 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
651 sizeof(rte_ixgbe_txq_strings[0]))
653 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
654 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
657 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
658 sizeof(rte_ixgbevf_stats_strings[0]))
661 * Atomically reads the link status information from global
662 * structure rte_eth_dev.
665 * - Pointer to the structure rte_eth_dev to read from.
666 * - Pointer to the buffer to be saved with the link status.
669 * - On success, zero.
670 * - On failure, negative value.
673 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
674 struct rte_eth_link *link)
676 struct rte_eth_link *dst = link;
677 struct rte_eth_link *src = &(dev->data->dev_link);
679 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
680 *(uint64_t *)src) == 0)
687 * Atomically writes the link status information into global
688 * structure rte_eth_dev.
691 * - Pointer to the structure rte_eth_dev to read from.
692 * - Pointer to the buffer to be saved with the link status.
695 * - On success, zero.
696 * - On failure, negative value.
699 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
700 struct rte_eth_link *link)
702 struct rte_eth_link *dst = &(dev->data->dev_link);
703 struct rte_eth_link *src = link;
705 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
706 *(uint64_t *)src) == 0)
713 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
716 ixgbe_is_sfp(struct ixgbe_hw *hw)
718 switch (hw->phy.type) {
719 case ixgbe_phy_sfp_avago:
720 case ixgbe_phy_sfp_ftl:
721 case ixgbe_phy_sfp_intel:
722 case ixgbe_phy_sfp_unknown:
723 case ixgbe_phy_sfp_passive_tyco:
724 case ixgbe_phy_sfp_passive_unknown:
731 static inline int32_t
732 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
737 status = ixgbe_reset_hw(hw);
739 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
740 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
741 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
742 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
743 IXGBE_WRITE_FLUSH(hw);
749 ixgbe_enable_intr(struct rte_eth_dev *dev)
751 struct ixgbe_interrupt *intr =
752 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
753 struct ixgbe_hw *hw =
754 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
756 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
757 IXGBE_WRITE_FLUSH(hw);
761 * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
764 ixgbe_disable_intr(struct ixgbe_hw *hw)
766 PMD_INIT_FUNC_TRACE();
768 if (hw->mac.type == ixgbe_mac_82598EB) {
769 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
771 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
772 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
773 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
775 IXGBE_WRITE_FLUSH(hw);
779 * This function resets queue statistics mapping registers.
780 * From Niantic datasheet, Initialization of Statistics section:
781 * "...if software requires the queue counters, the RQSMR and TQSM registers
782 * must be re-programmed following a device reset.
785 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
789 for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
790 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
791 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
797 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
802 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
803 #define NB_QMAP_FIELDS_PER_QSM_REG 4
804 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
806 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
807 struct ixgbe_stat_mapping_registers *stat_mappings =
808 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
809 uint32_t qsmr_mask = 0;
810 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
814 if ((hw->mac.type != ixgbe_mac_82599EB) &&
815 (hw->mac.type != ixgbe_mac_X540) &&
816 (hw->mac.type != ixgbe_mac_X550) &&
817 (hw->mac.type != ixgbe_mac_X550EM_x))
820 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
821 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
824 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
825 if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
826 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
829 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
831 /* Now clear any previous stat_idx set */
832 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
834 stat_mappings->tqsm[n] &= ~clearing_mask;
836 stat_mappings->rqsmr[n] &= ~clearing_mask;
838 q_map = (uint32_t)stat_idx;
839 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
840 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
842 stat_mappings->tqsm[n] |= qsmr_mask;
844 stat_mappings->rqsmr[n] |= qsmr_mask;
846 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
847 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
849 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
850 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
852 /* Now write the mapping in the appropriate register */
854 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
855 stat_mappings->rqsmr[n], n);
856 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
859 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
860 stat_mappings->tqsm[n], n);
861 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
867 ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
869 struct ixgbe_stat_mapping_registers *stat_mappings =
870 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
871 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
874 /* write whatever was in stat mapping table to the NIC */
875 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
877 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
880 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
885 ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
888 struct ixgbe_dcb_tc_config *tc;
889 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
891 dcb_config->num_tcs.pg_tcs = dcb_max_tc;
892 dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
893 for (i = 0; i < dcb_max_tc; i++) {
894 tc = &dcb_config->tc_config[i];
895 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
896 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
897 (uint8_t)(100/dcb_max_tc + (i & 1));
898 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
899 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
900 (uint8_t)(100/dcb_max_tc + (i & 1));
901 tc->pfc = ixgbe_dcb_pfc_disabled;
904 /* Initialize default user to priority mapping, UPx->TC0 */
905 tc = &dcb_config->tc_config[0];
906 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
907 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
908 for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
909 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
910 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
912 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
913 dcb_config->pfc_mode_enable = false;
914 dcb_config->vt_mode = true;
915 dcb_config->round_robin_enable = false;
916 /* support all DCB capabilities in 82599 */
917 dcb_config->support.capabilities = 0xFF;
919 /*we only support 4 Tcs for X540, X550 */
920 if (hw->mac.type == ixgbe_mac_X540 ||
921 hw->mac.type == ixgbe_mac_X550 ||
922 hw->mac.type == ixgbe_mac_X550EM_x) {
923 dcb_config->num_tcs.pg_tcs = 4;
924 dcb_config->num_tcs.pfc_tcs = 4;
929 * Ensure that all locks are released before first NVM or PHY access
932 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
937 * Phy lock should not fail in this early stage. If this is the case,
938 * it is due to an improper exit of the application.
939 * So force the release of the faulty lock. Release of common lock
940 * is done automatically by swfw_sync function.
942 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
943 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
944 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
946 ixgbe_release_swfw_semaphore(hw, mask);
949 * These ones are more tricky since they are common to all ports; but
950 * swfw_sync retries last long enough (1s) to be almost sure that if
951 * lock can not be taken it is due to an improper lock of the
954 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
955 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
956 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
958 ixgbe_release_swfw_semaphore(hw, mask);
962 * This function is based on code in ixgbe_attach() in base/ixgbe.c.
963 * It returns 0 on success.
966 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
968 struct rte_pci_device *pci_dev;
969 struct ixgbe_hw *hw =
970 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
971 struct ixgbe_vfta * shadow_vfta =
972 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
973 struct ixgbe_hwstrip *hwstrip =
974 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
975 struct ixgbe_dcb_config *dcb_config =
976 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
977 struct ixgbe_filter_info *filter_info =
978 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
983 PMD_INIT_FUNC_TRACE();
985 eth_dev->dev_ops = &ixgbe_eth_dev_ops;
986 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
987 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
990 * For secondary processes, we don't initialise any further as primary
991 * has already done this work. Only check we don't need a different
992 * RX and TX function.
994 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
995 struct ixgbe_tx_queue *txq;
996 /* TX queue function in primary, set by last queue initialized
997 * Tx queue may not initialized by primary process */
998 if (eth_dev->data->tx_queues) {
999 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1000 ixgbe_set_tx_function(eth_dev, txq);
1002 /* Use default TX function if we get here */
1003 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1004 "Using default TX function.");
1007 ixgbe_set_rx_function(eth_dev);
1011 pci_dev = eth_dev->pci_dev;
1013 rte_eth_copy_pci_info(eth_dev, pci_dev);
1015 /* Vendor and Device ID need to be set before init of shared code */
1016 hw->device_id = pci_dev->id.device_id;
1017 hw->vendor_id = pci_dev->id.vendor_id;
1018 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1019 hw->allow_unsupported_sfp = 1;
1021 /* Initialize the shared code (base driver) */
1022 #ifdef RTE_NIC_BYPASS
1023 diag = ixgbe_bypass_init_shared_code(hw);
1025 diag = ixgbe_init_shared_code(hw);
1026 #endif /* RTE_NIC_BYPASS */
1028 if (diag != IXGBE_SUCCESS) {
1029 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1033 /* pick up the PCI bus settings for reporting later */
1034 ixgbe_get_bus_info(hw);
1036 /* Unlock any pending hardware semaphore */
1037 ixgbe_swfw_lock_reset(hw);
1039 /* Initialize DCB configuration*/
1040 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1041 ixgbe_dcb_init(hw,dcb_config);
1042 /* Get Hardware Flow Control setting */
1043 hw->fc.requested_mode = ixgbe_fc_full;
1044 hw->fc.current_mode = ixgbe_fc_full;
1045 hw->fc.pause_time = IXGBE_FC_PAUSE;
1046 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1047 hw->fc.low_water[i] = IXGBE_FC_LO;
1048 hw->fc.high_water[i] = IXGBE_FC_HI;
1050 hw->fc.send_xon = 1;
1052 /* Make sure we have a good EEPROM before we read from it */
1053 diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1054 if (diag != IXGBE_SUCCESS) {
1055 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1059 #ifdef RTE_NIC_BYPASS
1060 diag = ixgbe_bypass_init_hw(hw);
1062 diag = ixgbe_init_hw(hw);
1063 #endif /* RTE_NIC_BYPASS */
1066 * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1067 * is called too soon after the kernel driver unbinding/binding occurs.
1068 * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1069 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1070 * also called. See ixgbe_identify_phy_82599(). The reason for the
1071 * failure is not known, and only occuts when virtualisation features
1072 * are disabled in the bios. A delay of 100ms was found to be enough by
1073 * trial-and-error, and is doubled to be safe.
1075 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1077 diag = ixgbe_init_hw(hw);
1080 if (diag == IXGBE_ERR_EEPROM_VERSION) {
1081 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1082 "LOM. Please be aware there may be issues associated "
1083 "with your hardware.");
1084 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1085 "please contact your Intel or hardware representative "
1086 "who provided you with this hardware.");
1087 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1088 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1090 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1094 /* Reset the hw statistics */
1095 ixgbe_dev_stats_reset(eth_dev);
1097 /* disable interrupt */
1098 ixgbe_disable_intr(hw);
1100 /* reset mappings for queue statistics hw counters*/
1101 ixgbe_reset_qstat_mappings(hw);
1103 /* Allocate memory for storing MAC addresses */
1104 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1105 hw->mac.num_rar_entries, 0);
1106 if (eth_dev->data->mac_addrs == NULL) {
1108 "Failed to allocate %u bytes needed to store "
1110 ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1113 /* Copy the permanent MAC address */
1114 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1115 ð_dev->data->mac_addrs[0]);
1117 /* Allocate memory for storing hash filter MAC addresses */
1118 eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1119 IXGBE_VMDQ_NUM_UC_MAC, 0);
1120 if (eth_dev->data->hash_mac_addrs == NULL) {
1122 "Failed to allocate %d bytes needed to store MAC addresses",
1123 ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1127 /* initialize the vfta */
1128 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1130 /* initialize the hw strip bitmap*/
1131 memset(hwstrip, 0, sizeof(*hwstrip));
1133 /* initialize PF if max_vfs not zero */
1134 ixgbe_pf_host_init(eth_dev);
1136 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1137 /* let hardware know driver is loaded */
1138 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1139 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1140 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1141 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1142 IXGBE_WRITE_FLUSH(hw);
1144 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1145 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1146 (int) hw->mac.type, (int) hw->phy.type,
1147 (int) hw->phy.sfp_type);
1149 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1150 (int) hw->mac.type, (int) hw->phy.type);
1152 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1153 eth_dev->data->port_id, pci_dev->id.vendor_id,
1154 pci_dev->id.device_id);
1156 rte_intr_callback_register(&pci_dev->intr_handle,
1157 ixgbe_dev_interrupt_handler,
1160 /* enable uio/vfio intr/eventfd mapping */
1161 rte_intr_enable(&pci_dev->intr_handle);
1163 /* enable support intr */
1164 ixgbe_enable_intr(eth_dev);
1166 /* initialize 5tuple filter list */
1167 TAILQ_INIT(&filter_info->fivetuple_list);
1168 memset(filter_info->fivetuple_mask, 0,
1169 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1175 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1177 struct rte_pci_device *pci_dev;
1178 struct ixgbe_hw *hw;
1180 PMD_INIT_FUNC_TRACE();
1182 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1185 hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1186 pci_dev = eth_dev->pci_dev;
1188 if (hw->adapter_stopped == 0)
1189 ixgbe_dev_close(eth_dev);
1191 eth_dev->dev_ops = NULL;
1192 eth_dev->rx_pkt_burst = NULL;
1193 eth_dev->tx_pkt_burst = NULL;
1195 /* Unlock any pending hardware semaphore */
1196 ixgbe_swfw_lock_reset(hw);
1198 /* disable uio intr before callback unregister */
1199 rte_intr_disable(&(pci_dev->intr_handle));
1200 rte_intr_callback_unregister(&(pci_dev->intr_handle),
1201 ixgbe_dev_interrupt_handler, (void *)eth_dev);
1203 /* uninitialize PF if max_vfs not zero */
1204 ixgbe_pf_host_uninit(eth_dev);
1206 rte_free(eth_dev->data->mac_addrs);
1207 eth_dev->data->mac_addrs = NULL;
1209 rte_free(eth_dev->data->hash_mac_addrs);
1210 eth_dev->data->hash_mac_addrs = NULL;
1216 * Negotiate mailbox API version with the PF.
1217 * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1218 * Then we try to negotiate starting with the most recent one.
1219 * If all negotiation attempts fail, then we will proceed with
1220 * the default one (ixgbe_mbox_api_10).
1223 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1227 /* start with highest supported, proceed down */
1228 static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1234 i != RTE_DIM(sup_ver) &&
1235 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1241 generate_random_mac_addr(struct ether_addr *mac_addr)
1245 /* Set Organizationally Unique Identifier (OUI) prefix. */
1246 mac_addr->addr_bytes[0] = 0x00;
1247 mac_addr->addr_bytes[1] = 0x09;
1248 mac_addr->addr_bytes[2] = 0xC0;
1249 /* Force indication of locally assigned MAC address. */
1250 mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1251 /* Generate the last 3 bytes of the MAC address with a random number. */
1252 random = rte_rand();
1253 memcpy(&mac_addr->addr_bytes[3], &random, 3);
1257 * Virtual Function device init
1260 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1264 struct rte_pci_device *pci_dev;
1265 struct ixgbe_hw *hw =
1266 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1267 struct ixgbe_vfta * shadow_vfta =
1268 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1269 struct ixgbe_hwstrip *hwstrip =
1270 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1271 struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1273 PMD_INIT_FUNC_TRACE();
1275 eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1276 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1277 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1279 /* for secondary processes, we don't initialise any further as primary
1280 * has already done this work. Only check we don't need a different
1282 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1283 if (eth_dev->data->scattered_rx)
1284 eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
1288 pci_dev = eth_dev->pci_dev;
1290 rte_eth_copy_pci_info(eth_dev, pci_dev);
1292 hw->device_id = pci_dev->id.device_id;
1293 hw->vendor_id = pci_dev->id.vendor_id;
1294 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1296 /* initialize the vfta */
1297 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1299 /* initialize the hw strip bitmap*/
1300 memset(hwstrip, 0, sizeof(*hwstrip));
1302 /* Initialize the shared code (base driver) */
1303 diag = ixgbe_init_shared_code(hw);
1304 if (diag != IXGBE_SUCCESS) {
1305 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1309 /* init_mailbox_params */
1310 hw->mbx.ops.init_params(hw);
1312 /* Reset the hw statistics */
1313 ixgbevf_dev_stats_reset(eth_dev);
1315 /* Disable the interrupts for VF */
1316 ixgbevf_intr_disable(hw);
1318 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1319 diag = hw->mac.ops.reset_hw(hw);
1322 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1323 * the underlying PF driver has not assigned a MAC address to the VF.
1324 * In this case, assign a random MAC address.
1326 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1327 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1331 /* negotiate mailbox API version to use with the PF. */
1332 ixgbevf_negotiate_api(hw);
1334 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1335 ixgbevf_get_queues(hw, &tcs, &tc);
1337 /* Allocate memory for storing MAC addresses */
1338 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1339 hw->mac.num_rar_entries, 0);
1340 if (eth_dev->data->mac_addrs == NULL) {
1342 "Failed to allocate %u bytes needed to store "
1344 ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1348 /* Generate a random MAC address, if none was assigned by PF. */
1349 if (is_zero_ether_addr(perm_addr)) {
1350 generate_random_mac_addr(perm_addr);
1351 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1353 rte_free(eth_dev->data->mac_addrs);
1354 eth_dev->data->mac_addrs = NULL;
1357 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1358 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1359 "%02x:%02x:%02x:%02x:%02x:%02x",
1360 perm_addr->addr_bytes[0],
1361 perm_addr->addr_bytes[1],
1362 perm_addr->addr_bytes[2],
1363 perm_addr->addr_bytes[3],
1364 perm_addr->addr_bytes[4],
1365 perm_addr->addr_bytes[5]);
1368 /* Copy the permanent MAC address */
1369 ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]);
1371 /* reset the hardware with the new settings */
1372 diag = hw->mac.ops.start_hw(hw);
1378 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1382 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1383 eth_dev->data->port_id, pci_dev->id.vendor_id,
1384 pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1389 /* Virtual Function device uninit */
1392 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1394 struct ixgbe_hw *hw;
1397 PMD_INIT_FUNC_TRACE();
1399 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1402 hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1404 if (hw->adapter_stopped == 0)
1405 ixgbevf_dev_close(eth_dev);
1407 eth_dev->dev_ops = NULL;
1408 eth_dev->rx_pkt_burst = NULL;
1409 eth_dev->tx_pkt_burst = NULL;
1411 /* Disable the interrupts for VF */
1412 ixgbevf_intr_disable(hw);
1414 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1415 ixgbe_dev_rx_queue_release(eth_dev->data->rx_queues[i]);
1416 eth_dev->data->rx_queues[i] = NULL;
1418 eth_dev->data->nb_rx_queues = 0;
1420 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1421 ixgbe_dev_tx_queue_release(eth_dev->data->tx_queues[i]);
1422 eth_dev->data->tx_queues[i] = NULL;
1424 eth_dev->data->nb_tx_queues = 0;
1426 rte_free(eth_dev->data->mac_addrs);
1427 eth_dev->data->mac_addrs = NULL;
1432 static struct eth_driver rte_ixgbe_pmd = {
1434 .name = "rte_ixgbe_pmd",
1435 .id_table = pci_id_ixgbe_map,
1436 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1437 RTE_PCI_DRV_DETACHABLE,
1439 .eth_dev_init = eth_ixgbe_dev_init,
1440 .eth_dev_uninit = eth_ixgbe_dev_uninit,
1441 .dev_private_size = sizeof(struct ixgbe_adapter),
1445 * virtual function driver struct
1447 static struct eth_driver rte_ixgbevf_pmd = {
1449 .name = "rte_ixgbevf_pmd",
1450 .id_table = pci_id_ixgbevf_map,
1451 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
1453 .eth_dev_init = eth_ixgbevf_dev_init,
1454 .eth_dev_uninit = eth_ixgbevf_dev_uninit,
1455 .dev_private_size = sizeof(struct ixgbe_adapter),
1459 * Driver initialization routine.
1460 * Invoked once at EAL init time.
1461 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
1464 rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
1466 PMD_INIT_FUNC_TRACE();
1468 rte_eth_driver_register(&rte_ixgbe_pmd);
1473 * VF Driver initialization routine.
1474 * Invoked one at EAL init time.
1475 * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
1478 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
1480 PMD_INIT_FUNC_TRACE();
1482 rte_eth_driver_register(&rte_ixgbevf_pmd);
1487 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1489 struct ixgbe_hw *hw =
1490 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1491 struct ixgbe_vfta * shadow_vfta =
1492 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1497 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1498 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1499 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1504 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1506 /* update local VFTA copy */
1507 shadow_vfta->vfta[vid_idx] = vfta;
1513 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1516 ixgbe_vlan_hw_strip_enable(dev, queue);
1518 ixgbe_vlan_hw_strip_disable(dev, queue);
1522 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1523 enum rte_vlan_type vlan_type,
1526 struct ixgbe_hw *hw =
1527 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1530 switch (vlan_type) {
1531 case ETH_VLAN_TYPE_INNER:
1532 /* Only the high 16-bits is valid */
1533 IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
1537 PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
1545 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1547 struct ixgbe_hw *hw =
1548 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1551 PMD_INIT_FUNC_TRACE();
1553 /* Filter Table Disable */
1554 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1555 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1557 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1561 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1563 struct ixgbe_hw *hw =
1564 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1565 struct ixgbe_vfta * shadow_vfta =
1566 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1570 PMD_INIT_FUNC_TRACE();
1572 /* Filter Table Enable */
1573 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1574 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1575 vlnctrl |= IXGBE_VLNCTRL_VFE;
1577 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1579 /* write whatever is in local vfta copy */
1580 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1581 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1585 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1587 struct ixgbe_hwstrip *hwstrip =
1588 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1590 if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
1594 IXGBE_SET_HWSTRIP(hwstrip, queue);
1596 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1600 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1602 struct ixgbe_hw *hw =
1603 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1606 PMD_INIT_FUNC_TRACE();
1608 if (hw->mac.type == ixgbe_mac_82598EB) {
1609 /* No queue level support */
1610 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1614 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1615 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1616 ctrl &= ~IXGBE_RXDCTL_VME;
1617 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1619 /* record those setting for HW strip per queue */
1620 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1624 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1626 struct ixgbe_hw *hw =
1627 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1630 PMD_INIT_FUNC_TRACE();
1632 if (hw->mac.type == ixgbe_mac_82598EB) {
1633 /* No queue level supported */
1634 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1638 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1639 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1640 ctrl |= IXGBE_RXDCTL_VME;
1641 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1643 /* record those setting for HW strip per queue */
1644 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1648 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
1650 struct ixgbe_hw *hw =
1651 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1655 PMD_INIT_FUNC_TRACE();
1657 if (hw->mac.type == ixgbe_mac_82598EB) {
1658 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1659 ctrl &= ~IXGBE_VLNCTRL_VME;
1660 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1663 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1664 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1665 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1666 ctrl &= ~IXGBE_RXDCTL_VME;
1667 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1669 /* record those setting for HW strip per queue */
1670 ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
1676 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
1678 struct ixgbe_hw *hw =
1679 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1683 PMD_INIT_FUNC_TRACE();
1685 if (hw->mac.type == ixgbe_mac_82598EB) {
1686 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1687 ctrl |= IXGBE_VLNCTRL_VME;
1688 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1691 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1692 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1693 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1694 ctrl |= IXGBE_RXDCTL_VME;
1695 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1697 /* record those setting for HW strip per queue */
1698 ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
1704 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1706 struct ixgbe_hw *hw =
1707 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1710 PMD_INIT_FUNC_TRACE();
1712 /* DMATXCTRL: Geric Double VLAN Disable */
1713 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1714 ctrl &= ~IXGBE_DMATXCTL_GDV;
1715 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1717 /* CTRL_EXT: Global Double VLAN Disable */
1718 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1719 ctrl &= ~IXGBE_EXTENDED_VLAN;
1720 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1725 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1727 struct ixgbe_hw *hw =
1728 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1731 PMD_INIT_FUNC_TRACE();
1733 /* DMATXCTRL: Geric Double VLAN Enable */
1734 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1735 ctrl |= IXGBE_DMATXCTL_GDV;
1736 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1738 /* CTRL_EXT: Global Double VLAN Enable */
1739 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1740 ctrl |= IXGBE_EXTENDED_VLAN;
1741 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1744 * VET EXT field in the EXVET register = 0x8100 by default
1745 * So no need to change. Same to VT field of DMATXCTL register
1750 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1752 if(mask & ETH_VLAN_STRIP_MASK){
1753 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1754 ixgbe_vlan_hw_strip_enable_all(dev);
1756 ixgbe_vlan_hw_strip_disable_all(dev);
1759 if(mask & ETH_VLAN_FILTER_MASK){
1760 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1761 ixgbe_vlan_hw_filter_enable(dev);
1763 ixgbe_vlan_hw_filter_disable(dev);
1766 if(mask & ETH_VLAN_EXTEND_MASK){
1767 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1768 ixgbe_vlan_hw_extend_enable(dev);
1770 ixgbe_vlan_hw_extend_disable(dev);
1775 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1777 struct ixgbe_hw *hw =
1778 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1779 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
1780 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1781 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
1782 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
1786 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1791 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1794 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1800 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
1801 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
1807 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
1809 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1810 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1811 uint16_t nb_tx_q = dev->data->nb_rx_queues;
1813 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1814 /* check multi-queue mode */
1815 switch (dev_conf->rxmode.mq_mode) {
1816 case ETH_MQ_RX_VMDQ_DCB:
1817 case ETH_MQ_RX_VMDQ_DCB_RSS:
1818 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1819 PMD_INIT_LOG(ERR, "SRIOV active,"
1820 " unsupported mq_mode rx %d.",
1821 dev_conf->rxmode.mq_mode);
1824 case ETH_MQ_RX_VMDQ_RSS:
1825 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1826 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1827 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1828 PMD_INIT_LOG(ERR, "SRIOV is active,"
1829 " invalid queue number"
1830 " for VMDQ RSS, allowed"
1831 " value are 1, 2 or 4.");
1835 case ETH_MQ_RX_VMDQ_ONLY:
1836 case ETH_MQ_RX_NONE:
1837 /* if nothing mq mode configure, use default scheme */
1838 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
1839 if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
1840 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1842 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1843 /* SRIOV only works in VMDq enable mode */
1844 PMD_INIT_LOG(ERR, "SRIOV is active,"
1845 " wrong mq_mode rx %d.",
1846 dev_conf->rxmode.mq_mode);
1850 switch (dev_conf->txmode.mq_mode) {
1851 case ETH_MQ_TX_VMDQ_DCB:
1852 /* DCB VMDQ in SRIOV mode, not implement yet */
1853 PMD_INIT_LOG(ERR, "SRIOV is active,"
1854 " unsupported VMDQ mq_mode tx %d.",
1855 dev_conf->txmode.mq_mode);
1857 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1858 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
1862 /* check valid queue number */
1863 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1864 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1865 PMD_INIT_LOG(ERR, "SRIOV is active,"
1866 " queue number must less equal to %d.",
1867 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1871 /* check configuration for vmdb+dcb mode */
1872 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1873 const struct rte_eth_vmdq_dcb_conf *conf;
1875 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
1876 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1877 IXGBE_VMDQ_DCB_NB_QUEUES);
1880 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1881 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1882 conf->nb_queue_pools == ETH_32_POOLS)) {
1883 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1884 " nb_queue_pools must be %d or %d.",
1885 ETH_16_POOLS, ETH_32_POOLS);
1889 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1890 const struct rte_eth_vmdq_dcb_tx_conf *conf;
1892 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
1893 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1894 IXGBE_VMDQ_DCB_NB_QUEUES);
1897 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1898 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1899 conf->nb_queue_pools == ETH_32_POOLS)) {
1900 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1901 " nb_queue_pools != %d and"
1902 " nb_queue_pools != %d.",
1903 ETH_16_POOLS, ETH_32_POOLS);
1908 /* For DCB mode check our configuration before we go further */
1909 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1910 const struct rte_eth_dcb_rx_conf *conf;
1912 if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
1913 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
1914 IXGBE_DCB_NB_QUEUES);
1917 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1918 if (!(conf->nb_tcs == ETH_4_TCS ||
1919 conf->nb_tcs == ETH_8_TCS)) {
1920 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1921 " and nb_tcs != %d.",
1922 ETH_4_TCS, ETH_8_TCS);
1927 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1928 const struct rte_eth_dcb_tx_conf *conf;
1930 if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
1931 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
1932 IXGBE_DCB_NB_QUEUES);
1935 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1936 if (!(conf->nb_tcs == ETH_4_TCS ||
1937 conf->nb_tcs == ETH_8_TCS)) {
1938 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1939 " and nb_tcs != %d.",
1940 ETH_4_TCS, ETH_8_TCS);
1949 ixgbe_dev_configure(struct rte_eth_dev *dev)
1951 struct ixgbe_interrupt *intr =
1952 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1953 struct ixgbe_adapter *adapter =
1954 (struct ixgbe_adapter *)dev->data->dev_private;
1957 PMD_INIT_FUNC_TRACE();
1958 /* multipe queue mode checking */
1959 ret = ixgbe_check_mq_mode(dev);
1961 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
1966 /* set flag to update link status after init */
1967 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1970 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1971 * allocation or vector Rx preconditions we will reset it.
1973 adapter->rx_bulk_alloc_allowed = true;
1974 adapter->rx_vec_allowed = true;
1980 * Configure device link speed and setup link.
1981 * It returns 0 on success.
1984 ixgbe_dev_start(struct rte_eth_dev *dev)
1986 struct ixgbe_hw *hw =
1987 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1988 struct ixgbe_vf_info *vfinfo =
1989 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
1990 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1991 uint32_t intr_vector = 0;
1992 int err, link_up = 0, negotiate = 0;
1998 PMD_INIT_FUNC_TRACE();
2000 /* IXGBE devices don't support half duplex */
2001 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
2002 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
2003 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
2004 dev->data->dev_conf.link_duplex,
2005 dev->data->port_id);
2009 /* disable uio/vfio intr/eventfd mapping */
2010 rte_intr_disable(intr_handle);
2013 hw->adapter_stopped = 0;
2014 ixgbe_stop_adapter(hw);
2016 /* reinitialize adapter
2017 * this calls reset and start */
2018 status = ixgbe_pf_reset_hw(hw);
2021 hw->mac.ops.start_hw(hw);
2022 hw->mac.get_link_status = true;
2024 /* configure PF module if SRIOV enabled */
2025 ixgbe_pf_host_configure(dev);
2027 /* check and configure queue intr-vector mapping */
2028 if ((rte_intr_cap_multiple(intr_handle) ||
2029 !RTE_ETH_DEV_SRIOV(dev).active) &&
2030 dev->data->dev_conf.intr_conf.rxq != 0) {
2031 intr_vector = dev->data->nb_rx_queues;
2032 if (rte_intr_efd_enable(intr_handle, intr_vector))
2036 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2037 intr_handle->intr_vec =
2038 rte_zmalloc("intr_vec",
2039 dev->data->nb_rx_queues * sizeof(int), 0);
2040 if (intr_handle->intr_vec == NULL) {
2041 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2042 " intr_vec\n", dev->data->nb_rx_queues);
2047 /* confiugre msix for sleep until rx interrupt */
2048 ixgbe_configure_msix(dev);
2050 /* initialize transmission unit */
2051 ixgbe_dev_tx_init(dev);
2053 /* This can fail when allocating mbufs for descriptor rings */
2054 err = ixgbe_dev_rx_init(dev);
2056 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2060 err = ixgbe_dev_rxtx_start(dev);
2062 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2066 /* Skip link setup if loopback mode is enabled for 82599. */
2067 if (hw->mac.type == ixgbe_mac_82599EB &&
2068 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2069 goto skip_link_setup;
2071 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2072 err = hw->mac.ops.setup_sfp(hw);
2077 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2078 /* Turn on the copper */
2079 ixgbe_set_phy_power(hw, true);
2081 /* Turn on the laser */
2082 ixgbe_enable_tx_laser(hw);
2085 err = ixgbe_check_link(hw, &speed, &link_up, 0);
2088 dev->data->dev_link.link_status = link_up;
2090 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2094 switch(dev->data->dev_conf.link_speed) {
2095 case ETH_LINK_SPEED_AUTONEG:
2096 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
2097 IXGBE_LINK_SPEED_82599_AUTONEG :
2098 IXGBE_LINK_SPEED_82598_AUTONEG;
2100 case ETH_LINK_SPEED_100:
2102 * Invalid for 82598 but error will be detected by
2103 * ixgbe_setup_link()
2105 speed = IXGBE_LINK_SPEED_100_FULL;
2107 case ETH_LINK_SPEED_1000:
2108 speed = IXGBE_LINK_SPEED_1GB_FULL;
2110 case ETH_LINK_SPEED_10000:
2111 speed = IXGBE_LINK_SPEED_10GB_FULL;
2114 PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
2115 dev->data->dev_conf.link_speed,
2116 dev->data->port_id);
2120 err = ixgbe_setup_link(hw, speed, link_up);
2126 if (rte_intr_allow_others(intr_handle)) {
2127 /* check if lsc interrupt is enabled */
2128 if (dev->data->dev_conf.intr_conf.lsc != 0)
2129 ixgbe_dev_lsc_interrupt_setup(dev);
2131 rte_intr_callback_unregister(intr_handle,
2132 ixgbe_dev_interrupt_handler,
2134 if (dev->data->dev_conf.intr_conf.lsc != 0)
2135 PMD_INIT_LOG(INFO, "lsc won't enable because of"
2136 " no intr multiplex\n");
2139 /* check if rxq interrupt is enabled */
2140 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2141 rte_intr_dp_is_en(intr_handle))
2142 ixgbe_dev_rxq_interrupt_setup(dev);
2144 /* enable uio/vfio intr/eventfd mapping */
2145 rte_intr_enable(intr_handle);
2147 /* resume enabled intr since hw reset */
2148 ixgbe_enable_intr(dev);
2150 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
2151 ETH_VLAN_EXTEND_MASK;
2152 ixgbe_vlan_offload_set(dev, mask);
2154 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2155 /* Enable vlan filtering for VMDq */
2156 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2159 /* Configure DCB hw */
2160 ixgbe_configure_dcb(dev);
2162 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2163 err = ixgbe_fdir_configure(dev);
2168 /* Restore vf rate limit */
2169 if (vfinfo != NULL) {
2170 for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
2171 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2172 if (vfinfo[vf].tx_rate[idx] != 0)
2173 ixgbe_set_vf_rate_limit(dev, vf,
2174 vfinfo[vf].tx_rate[idx],
2178 ixgbe_restore_statistics_mapping(dev);
2183 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2184 ixgbe_dev_clear_queues(dev);
2189 * Stop device: disable rx and tx functions to allow for reconfiguring.
2192 ixgbe_dev_stop(struct rte_eth_dev *dev)
2194 struct rte_eth_link link;
2195 struct ixgbe_hw *hw =
2196 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2197 struct ixgbe_vf_info *vfinfo =
2198 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2199 struct ixgbe_filter_info *filter_info =
2200 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2201 struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
2202 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
2205 PMD_INIT_FUNC_TRACE();
2207 /* disable interrupts */
2208 ixgbe_disable_intr(hw);
2210 /* disable intr eventfd mapping */
2211 rte_intr_disable(intr_handle);
2214 ixgbe_pf_reset_hw(hw);
2215 hw->adapter_stopped = 0;
2218 ixgbe_stop_adapter(hw);
2220 for (vf = 0; vfinfo != NULL &&
2221 vf < dev->pci_dev->max_vfs; vf++)
2222 vfinfo[vf].clear_to_send = false;
2224 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2225 /* Turn off the copper */
2226 ixgbe_set_phy_power(hw, false);
2228 /* Turn off the laser */
2229 ixgbe_disable_tx_laser(hw);
2232 ixgbe_dev_clear_queues(dev);
2234 /* Clear stored conf */
2235 dev->data->scattered_rx = 0;
2238 /* Clear recorded link status */
2239 memset(&link, 0, sizeof(link));
2240 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2242 /* Remove all ntuple filters of the device */
2243 for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
2244 p_5tuple != NULL; p_5tuple = p_5tuple_next) {
2245 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
2246 TAILQ_REMOVE(&filter_info->fivetuple_list,
2250 memset(filter_info->fivetuple_mask, 0,
2251 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
2253 if (!rte_intr_allow_others(intr_handle))
2254 /* resume to the default handler */
2255 rte_intr_callback_register(intr_handle,
2256 ixgbe_dev_interrupt_handler,
2259 /* Clean datapath event and queue/vec mapping */
2260 rte_intr_efd_disable(intr_handle);
2261 if (intr_handle->intr_vec != NULL) {
2262 rte_free(intr_handle->intr_vec);
2263 intr_handle->intr_vec = NULL;
2268 * Set device link up: enable tx.
2271 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2273 struct ixgbe_hw *hw =
2274 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2275 if (hw->mac.type == ixgbe_mac_82599EB) {
2276 #ifdef RTE_NIC_BYPASS
2277 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2278 /* Not suported in bypass mode */
2279 PMD_INIT_LOG(ERR, "Set link up is not supported "
2280 "by device id 0x%x", hw->device_id);
2286 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2287 /* Turn on the copper */
2288 ixgbe_set_phy_power(hw, true);
2290 /* Turn on the laser */
2291 ixgbe_enable_tx_laser(hw);
2298 * Set device link down: disable tx.
2301 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2303 struct ixgbe_hw *hw =
2304 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2305 if (hw->mac.type == ixgbe_mac_82599EB) {
2306 #ifdef RTE_NIC_BYPASS
2307 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2308 /* Not suported in bypass mode */
2309 PMD_INIT_LOG(ERR, "Set link down is not supported "
2310 "by device id 0x%x", hw->device_id);
2316 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2317 /* Turn off the copper */
2318 ixgbe_set_phy_power(hw, false);
2320 /* Turn off the laser */
2321 ixgbe_disable_tx_laser(hw);
2328 * Reest and stop device.
2331 ixgbe_dev_close(struct rte_eth_dev *dev)
2333 struct ixgbe_hw *hw =
2334 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2336 PMD_INIT_FUNC_TRACE();
2338 ixgbe_pf_reset_hw(hw);
2340 ixgbe_dev_stop(dev);
2341 hw->adapter_stopped = 1;
2343 ixgbe_dev_free_queues(dev);
2345 ixgbe_disable_pcie_master(hw);
2347 /* reprogram the RAR[0] in case user changed it. */
2348 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2352 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
2353 struct ixgbe_hw_stats *hw_stats,
2354 uint64_t *total_missed_rx, uint64_t *total_qbrc,
2355 uint64_t *total_qprc, uint64_t *total_qprdc)
2357 uint32_t bprc, lxon, lxoff, total;
2358 uint32_t delta_gprc = 0;
2360 /* Workaround for RX byte count not including CRC bytes when CRC
2361 + * strip is enabled. CRC bytes are removed from counters when crc_strip
2364 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
2365 IXGBE_HLREG0_RXCRCSTRP);
2367 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2368 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2369 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2370 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2372 for (i = 0; i < 8; i++) {
2374 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2375 /* global total per queue */
2376 hw_stats->mpc[i] += mp;
2377 /* Running comprehensive total for stats display */
2378 *total_missed_rx += hw_stats->mpc[i];
2379 if (hw->mac.type == ixgbe_mac_82598EB) {
2380 hw_stats->rnbc[i] +=
2381 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2382 hw_stats->pxonrxc[i] +=
2383 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2384 hw_stats->pxoffrxc[i] +=
2385 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2387 hw_stats->pxonrxc[i] +=
2388 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2389 hw_stats->pxoffrxc[i] +=
2390 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2391 hw_stats->pxon2offc[i] +=
2392 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2394 hw_stats->pxontxc[i] +=
2395 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2396 hw_stats->pxofftxc[i] +=
2397 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2399 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2400 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2401 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2402 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2404 delta_gprc += delta_qprc;
2406 hw_stats->qprc[i] += delta_qprc;
2407 hw_stats->qptc[i] += delta_qptc;
2409 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2410 hw_stats->qbrc[i] +=
2411 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2413 hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
2415 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2416 hw_stats->qbtc[i] +=
2417 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2419 hw_stats->qprdc[i] += delta_qprdc;
2420 *total_qprdc += hw_stats->qprdc[i];
2422 *total_qprc += hw_stats->qprc[i];
2423 *total_qbrc += hw_stats->qbrc[i];
2425 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2426 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2427 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2430 * An errata states that gprc actually counts good + missed packets:
2431 * Workaround to set gprc to summated queue packet receives
2433 hw_stats->gprc = *total_qprc;
2435 if (hw->mac.type != ixgbe_mac_82598EB) {
2436 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
2437 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
2438 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
2439 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
2440 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
2441 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
2442 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
2443 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
2445 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2446 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2447 /* 82598 only has a counter in the high register */
2448 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2449 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2450 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2452 uint64_t old_tpr = hw_stats->tpr;
2454 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2455 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
2458 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
2460 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
2461 hw_stats->gptc += delta_gptc;
2462 hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
2463 hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
2466 * Workaround: mprc hardware is incorrectly counting
2467 * broadcasts, so for now we subtract those.
2469 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2470 hw_stats->bprc += bprc;
2471 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2472 if (hw->mac.type == ixgbe_mac_82598EB)
2473 hw_stats->mprc -= bprc;
2475 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2476 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2477 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2478 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2479 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2480 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2482 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2483 hw_stats->lxontxc += lxon;
2484 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2485 hw_stats->lxofftxc += lxoff;
2486 total = lxon + lxoff;
2488 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2489 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2490 hw_stats->gptc -= total;
2491 hw_stats->mptc -= total;
2492 hw_stats->ptc64 -= total;
2493 hw_stats->gotc -= total * ETHER_MIN_LEN;
2495 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2496 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2497 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2498 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2499 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
2500 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
2501 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
2502 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2503 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2504 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2505 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2506 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2507 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2508 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
2509 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
2510 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
2511 /* Only read FCOE on 82599 */
2512 if (hw->mac.type != ixgbe_mac_82598EB) {
2513 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
2514 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
2515 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
2516 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
2517 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
2520 /* Flow Director Stats registers */
2521 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
2522 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
2526 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
2529 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2531 struct ixgbe_hw *hw =
2532 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2533 struct ixgbe_hw_stats *hw_stats =
2534 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2535 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
2538 total_missed_rx = 0;
2543 ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
2544 &total_qprc, &total_qprdc);
2549 /* Fill out the rte_eth_stats statistics structure */
2550 stats->ipackets = total_qprc;
2551 stats->ibytes = total_qbrc;
2552 stats->opackets = hw_stats->gptc;
2553 stats->obytes = hw_stats->gotc;
2555 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2556 stats->q_ipackets[i] = hw_stats->qprc[i];
2557 stats->q_opackets[i] = hw_stats->qptc[i];
2558 stats->q_ibytes[i] = hw_stats->qbrc[i];
2559 stats->q_obytes[i] = hw_stats->qbtc[i];
2560 stats->q_errors[i] = hw_stats->qprdc[i];
2564 stats->imissed = total_missed_rx;
2565 stats->ierrors = hw_stats->crcerrs +
2582 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
2584 struct ixgbe_hw_stats *stats =
2585 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2587 /* HW registers are cleared on read */
2588 ixgbe_dev_stats_get(dev, NULL);
2590 /* Reset software totals */
2591 memset(stats, 0, sizeof(*stats));
2594 /* This function calculates the number of xstats based on the current config */
2596 ixgbe_xstats_calc_num(void) {
2597 return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) +
2598 (IXGBE_NB_TXQ_PRIO_STATS * 8);
2602 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
2605 struct ixgbe_hw *hw =
2606 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2607 struct ixgbe_hw_stats *hw_stats =
2608 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2609 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
2610 unsigned i, stat, count = 0;
2612 count = ixgbe_xstats_calc_num();
2617 total_missed_rx = 0;
2622 ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
2623 &total_qprc, &total_qprdc);
2625 /* If this is a reset xstats is NULL, and we have cleared the
2626 * registers by reading them.
2631 /* Extended stats from ixgbe_hw_stats */
2633 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
2634 snprintf(xstats[count].name, sizeof(xstats[count].name), "%s",
2635 rte_ixgbe_stats_strings[i].name);
2636 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2637 rte_ixgbe_stats_strings[i].offset);
2641 /* RX Priority Stats */
2642 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
2643 for (i = 0; i < 8; i++) {
2644 snprintf(xstats[count].name, sizeof(xstats[count].name),
2645 "rx_priority%u_%s", i,
2646 rte_ixgbe_rxq_strings[stat].name);
2647 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2648 rte_ixgbe_rxq_strings[stat].offset +
2649 (sizeof(uint64_t) * i));
2654 /* TX Priority Stats */
2655 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
2656 for (i = 0; i < 8; i++) {
2657 snprintf(xstats[count].name, sizeof(xstats[count].name),
2658 "tx_priority%u_%s", i,
2659 rte_ixgbe_txq_strings[stat].name);
2660 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2661 rte_ixgbe_txq_strings[stat].offset +
2662 (sizeof(uint64_t) * i));
2671 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2673 struct ixgbe_hw_stats *stats =
2674 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2676 unsigned count = ixgbe_xstats_calc_num();
2678 /* HW registers are cleared on read */
2679 ixgbe_dev_xstats_get(dev, NULL, count);
2681 /* Reset software totals */
2682 memset(stats, 0, sizeof(*stats));
2686 ixgbevf_update_stats(struct rte_eth_dev *dev)
2688 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2689 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
2690 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2692 /* Good Rx packet, include VF loopback */
2693 UPDATE_VF_STAT(IXGBE_VFGPRC,
2694 hw_stats->last_vfgprc, hw_stats->vfgprc);
2696 /* Good Rx octets, include VF loopback */
2697 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2698 hw_stats->last_vfgorc, hw_stats->vfgorc);
2700 /* Good Tx packet, include VF loopback */
2701 UPDATE_VF_STAT(IXGBE_VFGPTC,
2702 hw_stats->last_vfgptc, hw_stats->vfgptc);
2704 /* Good Tx octets, include VF loopback */
2705 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2706 hw_stats->last_vfgotc, hw_stats->vfgotc);
2708 /* Rx Multicst Packet */
2709 UPDATE_VF_STAT(IXGBE_VFMPRC,
2710 hw_stats->last_vfmprc, hw_stats->vfmprc);
2714 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
2717 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
2718 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2721 if (n < IXGBEVF_NB_XSTATS)
2722 return IXGBEVF_NB_XSTATS;
2724 ixgbevf_update_stats(dev);
2729 /* Extended stats */
2730 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
2731 snprintf(xstats[i].name, sizeof(xstats[i].name),
2732 "%s", rte_ixgbevf_stats_strings[i].name);
2733 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
2734 rte_ixgbevf_stats_strings[i].offset);
2737 return IXGBEVF_NB_XSTATS;
2741 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2743 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
2744 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2746 ixgbevf_update_stats(dev);
2751 stats->ipackets = hw_stats->vfgprc;
2752 stats->ibytes = hw_stats->vfgorc;
2753 stats->opackets = hw_stats->vfgptc;
2754 stats->obytes = hw_stats->vfgotc;
2755 stats->imcasts = hw_stats->vfmprc;
2756 /* stats->imcasts should be removed as imcasts is deprecated */
2760 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
2762 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
2763 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2765 /* Sync HW register to the last stats */
2766 ixgbevf_dev_stats_get(dev, NULL);
2768 /* reset HW current stats*/
2769 hw_stats->vfgprc = 0;
2770 hw_stats->vfgorc = 0;
2771 hw_stats->vfgptc = 0;
2772 hw_stats->vfgotc = 0;
2773 hw_stats->vfmprc = 0;
2778 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2780 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2782 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2783 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2784 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
2785 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
2786 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2787 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2788 dev_info->max_vfs = dev->pci_dev->max_vfs;
2789 if (hw->mac.type == ixgbe_mac_82598EB)
2790 dev_info->max_vmdq_pools = ETH_16_POOLS;
2792 dev_info->max_vmdq_pools = ETH_64_POOLS;
2793 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2794 dev_info->rx_offload_capa =
2795 DEV_RX_OFFLOAD_VLAN_STRIP |
2796 DEV_RX_OFFLOAD_IPV4_CKSUM |
2797 DEV_RX_OFFLOAD_UDP_CKSUM |
2798 DEV_RX_OFFLOAD_TCP_CKSUM;
2801 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2804 if ((hw->mac.type == ixgbe_mac_82599EB ||
2805 hw->mac.type == ixgbe_mac_X540) &&
2806 !RTE_ETH_DEV_SRIOV(dev).active)
2807 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
2809 dev_info->tx_offload_capa =
2810 DEV_TX_OFFLOAD_VLAN_INSERT |
2811 DEV_TX_OFFLOAD_IPV4_CKSUM |
2812 DEV_TX_OFFLOAD_UDP_CKSUM |
2813 DEV_TX_OFFLOAD_TCP_CKSUM |
2814 DEV_TX_OFFLOAD_SCTP_CKSUM |
2815 DEV_TX_OFFLOAD_TCP_TSO;
2817 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2819 .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2820 .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2821 .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2823 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2827 dev_info->default_txconf = (struct rte_eth_txconf) {
2829 .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2830 .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2831 .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2833 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2834 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2835 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2836 ETH_TXQ_FLAGS_NOOFFLOADS,
2839 dev_info->rx_desc_lim = rx_desc_lim;
2840 dev_info->tx_desc_lim = tx_desc_lim;
2842 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2843 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
2844 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
2848 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
2849 struct rte_eth_dev_info *dev_info)
2851 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2853 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2854 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2855 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
2856 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
2857 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2858 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2859 dev_info->max_vfs = dev->pci_dev->max_vfs;
2860 if (hw->mac.type == ixgbe_mac_82598EB)
2861 dev_info->max_vmdq_pools = ETH_16_POOLS;
2863 dev_info->max_vmdq_pools = ETH_64_POOLS;
2864 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
2865 DEV_RX_OFFLOAD_IPV4_CKSUM |
2866 DEV_RX_OFFLOAD_UDP_CKSUM |
2867 DEV_RX_OFFLOAD_TCP_CKSUM;
2868 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
2869 DEV_TX_OFFLOAD_IPV4_CKSUM |
2870 DEV_TX_OFFLOAD_UDP_CKSUM |
2871 DEV_TX_OFFLOAD_TCP_CKSUM |
2872 DEV_TX_OFFLOAD_SCTP_CKSUM |
2873 DEV_TX_OFFLOAD_TCP_TSO;
2875 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2877 .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2878 .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2879 .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2881 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2885 dev_info->default_txconf = (struct rte_eth_txconf) {
2887 .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2888 .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2889 .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2891 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2892 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2893 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2894 ETH_TXQ_FLAGS_NOOFFLOADS,
2897 dev_info->rx_desc_lim = rx_desc_lim;
2898 dev_info->tx_desc_lim = tx_desc_lim;
2901 /* return 0 means link status changed, -1 means not changed */
2903 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2905 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2906 struct rte_eth_link link, old;
2907 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
2911 link.link_status = 0;
2912 link.link_speed = 0;
2913 link.link_duplex = 0;
2914 memset(&old, 0, sizeof(old));
2915 rte_ixgbe_dev_atomic_read_link_status(dev, &old);
2917 hw->mac.get_link_status = true;
2919 /* check if it needs to wait to complete, if lsc interrupt is enabled */
2920 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2921 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
2923 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
2926 link.link_speed = ETH_LINK_SPEED_100;
2927 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2928 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2929 if (link.link_status == old.link_status)
2935 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2936 if (link.link_status == old.link_status)
2940 link.link_status = 1;
2941 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2943 switch (link_speed) {
2945 case IXGBE_LINK_SPEED_UNKNOWN:
2946 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2947 link.link_speed = ETH_LINK_SPEED_100;
2950 case IXGBE_LINK_SPEED_100_FULL:
2951 link.link_speed = ETH_LINK_SPEED_100;
2954 case IXGBE_LINK_SPEED_1GB_FULL:
2955 link.link_speed = ETH_LINK_SPEED_1000;
2958 case IXGBE_LINK_SPEED_10GB_FULL:
2959 link.link_speed = ETH_LINK_SPEED_10000;
2962 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2964 if (link.link_status == old.link_status)
2971 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2973 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2976 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2977 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2978 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2982 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2984 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2987 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2988 fctrl &= (~IXGBE_FCTRL_UPE);
2989 if (dev->data->all_multicast == 1)
2990 fctrl |= IXGBE_FCTRL_MPE;
2992 fctrl &= (~IXGBE_FCTRL_MPE);
2993 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2997 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2999 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3002 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3003 fctrl |= IXGBE_FCTRL_MPE;
3004 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3008 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
3010 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3013 if (dev->data->promiscuous == 1)
3014 return; /* must remain in all_multicast mode */
3016 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3017 fctrl &= (~IXGBE_FCTRL_MPE);
3018 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3022 * It clears the interrupt causes and enables the interrupt.
3023 * It will be called once only during nic initialized.
3026 * Pointer to struct rte_eth_dev.
3029 * - On success, zero.
3030 * - On failure, a negative value.
3033 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
3035 struct ixgbe_interrupt *intr =
3036 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3038 ixgbe_dev_link_status_print(dev);
3039 intr->mask |= IXGBE_EICR_LSC;
3045 * It clears the interrupt causes and enables the interrupt.
3046 * It will be called once only during nic initialized.
3049 * Pointer to struct rte_eth_dev.
3052 * - On success, zero.
3053 * - On failure, a negative value.
3056 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
3058 struct ixgbe_interrupt *intr =
3059 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3061 intr->mask |= IXGBE_EICR_RTX_QUEUE;
3067 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
3070 * Pointer to struct rte_eth_dev.
3073 * - On success, zero.
3074 * - On failure, a negative value.
3077 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
3080 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3081 struct ixgbe_interrupt *intr =
3082 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3084 /* clear all cause mask */
3085 ixgbe_disable_intr(hw);
3087 /* read-on-clear nic registers here */
3088 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3089 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
3093 /* set flag for async link update */
3094 if (eicr & IXGBE_EICR_LSC)
3095 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3097 if (eicr & IXGBE_EICR_MAILBOX)
3098 intr->flags |= IXGBE_FLAG_MAILBOX;
3104 * It gets and then prints the link status.
3107 * Pointer to struct rte_eth_dev.
3110 * - On success, zero.
3111 * - On failure, a negative value.
3114 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
3116 struct rte_eth_link link;
3118 memset(&link, 0, sizeof(link));
3119 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
3120 if (link.link_status) {
3121 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
3122 (int)(dev->data->port_id),
3123 (unsigned)link.link_speed,
3124 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
3125 "full-duplex" : "half-duplex");
3127 PMD_INIT_LOG(INFO, " Port %d: Link Down",
3128 (int)(dev->data->port_id));
3130 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
3131 dev->pci_dev->addr.domain,
3132 dev->pci_dev->addr.bus,
3133 dev->pci_dev->addr.devid,
3134 dev->pci_dev->addr.function);
3138 * It executes link_update after knowing an interrupt occurred.
3141 * Pointer to struct rte_eth_dev.
3144 * - On success, zero.
3145 * - On failure, a negative value.
3148 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
3150 struct ixgbe_interrupt *intr =
3151 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3153 struct rte_eth_link link;
3154 int intr_enable_delay = false;
3156 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
3158 if (intr->flags & IXGBE_FLAG_MAILBOX) {
3159 ixgbe_pf_mbx_process(dev);
3160 intr->flags &= ~IXGBE_FLAG_MAILBOX;
3163 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3164 /* get the link status before link update, for predicting later */
3165 memset(&link, 0, sizeof(link));
3166 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
3168 ixgbe_dev_link_update(dev, 0);
3171 if (!link.link_status)
3172 /* handle it 1 sec later, wait it being stable */
3173 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
3174 /* likely to down */
3176 /* handle it 4 sec later, wait it being stable */
3177 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
3179 ixgbe_dev_link_status_print(dev);
3181 intr_enable_delay = true;
3184 if (intr_enable_delay) {
3185 if (rte_eal_alarm_set(timeout * 1000,
3186 ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
3187 PMD_DRV_LOG(ERR, "Error setting alarm");
3189 PMD_DRV_LOG(DEBUG, "enable intr immediately");
3190 ixgbe_enable_intr(dev);
3191 rte_intr_enable(&(dev->pci_dev->intr_handle));
3199 * Interrupt handler which shall be registered for alarm callback for delayed
3200 * handling specific interrupt to wait for the stable nic state. As the
3201 * NIC interrupt state is not stable for ixgbe after link is just down,
3202 * it needs to wait 4 seconds to get the stable status.
3205 * Pointer to interrupt handle.
3207 * The address of parameter (struct rte_eth_dev *) regsitered before.
3213 ixgbe_dev_interrupt_delayed_handler(void *param)
3215 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3216 struct ixgbe_interrupt *intr =
3217 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3218 struct ixgbe_hw *hw =
3219 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3222 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3223 if (eicr & IXGBE_EICR_MAILBOX)
3224 ixgbe_pf_mbx_process(dev);
3226 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3227 ixgbe_dev_link_update(dev, 0);
3228 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3229 ixgbe_dev_link_status_print(dev);
3230 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
3233 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
3234 ixgbe_enable_intr(dev);
3235 rte_intr_enable(&(dev->pci_dev->intr_handle));
3239 * Interrupt handler triggered by NIC for handling
3240 * specific interrupt.
3243 * Pointer to interrupt handle.
3245 * The address of parameter (struct rte_eth_dev *) regsitered before.
3251 ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3254 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3256 ixgbe_dev_interrupt_get_status(dev);
3257 ixgbe_dev_interrupt_action(dev);
3261 ixgbe_dev_led_on(struct rte_eth_dev *dev)
3263 struct ixgbe_hw *hw;
3265 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3266 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
3270 ixgbe_dev_led_off(struct rte_eth_dev *dev)
3272 struct ixgbe_hw *hw;
3274 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3275 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
3279 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3281 struct ixgbe_hw *hw;
3287 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3289 fc_conf->pause_time = hw->fc.pause_time;
3290 fc_conf->high_water = hw->fc.high_water[0];
3291 fc_conf->low_water = hw->fc.low_water[0];
3292 fc_conf->send_xon = hw->fc.send_xon;
3293 fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3296 * Return rx_pause status according to actual setting of
3299 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3300 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
3306 * Return tx_pause status according to actual setting of
3309 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3310 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
3315 if (rx_pause && tx_pause)
3316 fc_conf->mode = RTE_FC_FULL;
3318 fc_conf->mode = RTE_FC_RX_PAUSE;
3320 fc_conf->mode = RTE_FC_TX_PAUSE;
3322 fc_conf->mode = RTE_FC_NONE;
3328 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3330 struct ixgbe_hw *hw;
3332 uint32_t rx_buf_size;
3333 uint32_t max_high_water;
3335 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
3342 PMD_INIT_FUNC_TRACE();
3344 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3345 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
3346 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3349 * At least reserve one Ethernet frame for watermark
3350 * high_water/low_water in kilo bytes for ixgbe
3352 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
3353 if ((fc_conf->high_water > max_high_water) ||
3354 (fc_conf->high_water < fc_conf->low_water)) {
3355 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3356 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3360 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
3361 hw->fc.pause_time = fc_conf->pause_time;
3362 hw->fc.high_water[0] = fc_conf->high_water;
3363 hw->fc.low_water[0] = fc_conf->low_water;
3364 hw->fc.send_xon = fc_conf->send_xon;
3365 hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3367 err = ixgbe_fc_enable(hw);
3369 /* Not negotiated is not an error case */
3370 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
3372 /* check if we want to forward MAC frames - driver doesn't have native
3373 * capability to do that, so we'll write the registers ourselves */
3375 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3377 /* set or clear MFLCN.PMCF bit depending on configuration */
3378 if (fc_conf->mac_ctrl_frame_fwd != 0)
3379 mflcn |= IXGBE_MFLCN_PMCF;
3381 mflcn &= ~IXGBE_MFLCN_PMCF;
3383 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
3384 IXGBE_WRITE_FLUSH(hw);
3389 PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
3394 * ixgbe_pfc_enable_generic - Enable flow control
3395 * @hw: pointer to hardware structure
3396 * @tc_num: traffic class number
3397 * Enable flow control according to the current settings.
3400 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
3403 uint32_t mflcn_reg, fccfg_reg;
3405 uint32_t fcrtl, fcrth;
3409 /* Validate the water mark configuration */
3410 if (!hw->fc.pause_time) {
3411 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3415 /* Low water mark of zero causes XOFF floods */
3416 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
3417 /* High/Low water can not be 0 */
3418 if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
3419 PMD_INIT_LOG(ERR, "Invalid water mark configuration");
3420 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3424 if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
3425 PMD_INIT_LOG(ERR, "Invalid water mark configuration");
3426 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3430 /* Negotiate the fc mode to use */
3431 ixgbe_fc_autoneg(hw);
3433 /* Disable any previous flow control settings */
3434 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3435 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
3437 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3438 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
3440 switch (hw->fc.current_mode) {
3443 * If the count of enabled RX Priority Flow control >1,
3444 * and the TX pause can not be disabled
3447 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3448 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
3449 if (reg & IXGBE_FCRTH_FCEN)
3453 fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3455 case ixgbe_fc_rx_pause:
3457 * Rx Flow control is enabled and Tx Flow control is
3458 * disabled by software override. Since there really
3459 * isn't a way to advertise that we are capable of RX
3460 * Pause ONLY, we will advertise that we support both
3461 * symmetric and asymmetric Rx PAUSE. Later, we will
3462 * disable the adapter's ability to send PAUSE frames.
3464 mflcn_reg |= IXGBE_MFLCN_RPFCE;
3466 * If the count of enabled RX Priority Flow control >1,
3467 * and the TX pause can not be disabled
3470 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3471 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
3472 if (reg & IXGBE_FCRTH_FCEN)
3476 fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3478 case ixgbe_fc_tx_pause:
3480 * Tx Flow control is enabled, and Rx Flow control is
3481 * disabled by software override.
3483 fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
3486 /* Flow control (both Rx and Tx) is enabled by SW override. */
3487 mflcn_reg |= IXGBE_MFLCN_RPFCE;
3488 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
3491 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
3492 ret_val = IXGBE_ERR_CONFIG;
3497 /* Set 802.3x based flow control settings. */
3498 mflcn_reg |= IXGBE_MFLCN_DPF;
3499 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
3500 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
3502 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
3503 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
3504 hw->fc.high_water[tc_num]) {
3505 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
3506 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
3507 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
3509 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
3511 * In order to prevent Tx hangs when the internal Tx
3512 * switch is enabled we must set the high water mark
3513 * to the maximum FCRTH value. This allows the Tx
3514 * switch to function even under heavy Rx workloads.
3516 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
3518 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
3520 /* Configure pause time (2 TCs per register) */
3521 reg = hw->fc.pause_time * 0x00010001;
3522 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
3523 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
3525 /* Configure flow control refresh threshold value */
3526 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
3533 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
3535 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3536 int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
3538 if(hw->mac.type != ixgbe_mac_82598EB) {
3539 ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
3545 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
3548 uint32_t rx_buf_size;
3549 uint32_t max_high_water;
3551 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
3552 struct ixgbe_hw *hw =
3553 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3554 struct ixgbe_dcb_config *dcb_config =
3555 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3557 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
3564 PMD_INIT_FUNC_TRACE();
3566 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3567 tc_num = map[pfc_conf->priority];
3568 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
3569 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3571 * At least reserve one Ethernet frame for watermark
3572 * high_water/low_water in kilo bytes for ixgbe
3574 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
3575 if ((pfc_conf->fc.high_water > max_high_water) ||
3576 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
3577 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3578 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3582 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
3583 hw->fc.pause_time = pfc_conf->fc.pause_time;
3584 hw->fc.send_xon = pfc_conf->fc.send_xon;
3585 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
3586 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3588 err = ixgbe_dcb_pfc_enable(dev,tc_num);
3590 /* Not negotiated is not an error case */
3591 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
3594 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
3599 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3600 struct rte_eth_rss_reta_entry64 *reta_conf,
3605 uint16_t idx, shift;
3606 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3607 uint16_t sp_reta_size;
3610 PMD_INIT_FUNC_TRACE();
3612 if (!ixgbe_rss_update_sp(hw->mac.type)) {
3613 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3618 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3619 if (reta_size != sp_reta_size) {
3620 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3621 "(%d) doesn't match the number hardware can supported "
3622 "(%d)\n", reta_size, sp_reta_size);
3626 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
3627 idx = i / RTE_RETA_GROUP_SIZE;
3628 shift = i % RTE_RETA_GROUP_SIZE;
3629 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3633 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3634 if (mask == IXGBE_4_BIT_MASK)
3637 r = IXGBE_READ_REG(hw, reta_reg);
3638 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
3639 if (mask & (0x1 << j))
3640 reta |= reta_conf[idx].reta[shift + j] <<
3643 reta |= r & (IXGBE_8_BIT_MASK <<
3646 IXGBE_WRITE_REG(hw, reta_reg, reta);
3653 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3654 struct rte_eth_rss_reta_entry64 *reta_conf,
3659 uint16_t idx, shift;
3660 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3661 uint16_t sp_reta_size;
3664 PMD_INIT_FUNC_TRACE();
3665 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3666 if (reta_size != sp_reta_size) {
3667 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3668 "(%d) doesn't match the number hardware can supported "
3669 "(%d)\n", reta_size, sp_reta_size);
3673 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
3674 idx = i / RTE_RETA_GROUP_SIZE;
3675 shift = i % RTE_RETA_GROUP_SIZE;
3676 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3681 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3682 reta = IXGBE_READ_REG(hw, reta_reg);
3683 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
3684 if (mask & (0x1 << j))
3685 reta_conf[idx].reta[shift + j] =
3686 ((reta >> (CHAR_BIT * j)) &
3695 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
3696 uint32_t index, uint32_t pool)
3698 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3699 uint32_t enable_addr = 1;
3701 ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
3705 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3707 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3709 ixgbe_clear_rar(hw, index);
3713 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
3715 ixgbe_remove_rar(dev, 0);
3717 ixgbe_add_rar(dev, addr, 0, 0);
3721 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3725 struct ixgbe_hw *hw;
3726 struct rte_eth_dev_info dev_info;
3727 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3729 ixgbe_dev_info_get(dev, &dev_info);
3731 /* check that mtu is within the allowed range */
3732 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
3735 /* refuse mtu that requires the support of scattered packets when this
3736 * feature has not been enabled before. */
3737 if (!dev->data->scattered_rx &&
3738 (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
3739 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
3742 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3743 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3745 /* switch to jumbo mode if needed */
3746 if (frame_size > ETHER_MAX_LEN) {
3747 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3748 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3750 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3751 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
3753 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3755 /* update max frame size */
3756 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3758 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
3759 maxfrs &= 0x0000FFFF;
3760 maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
3761 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
3767 * Virtual Function operations
3770 ixgbevf_intr_disable(struct ixgbe_hw *hw)
3772 PMD_INIT_FUNC_TRACE();
3774 /* Clear interrupt mask to stop from interrupts being generated */
3775 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
3777 IXGBE_WRITE_FLUSH(hw);
3781 ixgbevf_intr_enable(struct ixgbe_hw *hw)
3783 PMD_INIT_FUNC_TRACE();
3785 /* VF enable interrupt autoclean */
3786 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
3787 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
3788 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
3790 IXGBE_WRITE_FLUSH(hw);
3794 ixgbevf_dev_configure(struct rte_eth_dev *dev)
3796 struct rte_eth_conf* conf = &dev->data->dev_conf;
3797 struct ixgbe_adapter *adapter =
3798 (struct ixgbe_adapter *)dev->data->dev_private;
3800 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
3801 dev->data->port_id);
3804 * VF has no ability to enable/disable HW CRC
3805 * Keep the persistent behavior the same as Host PF
3807 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
3808 if (!conf->rxmode.hw_strip_crc) {
3809 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
3810 conf->rxmode.hw_strip_crc = 1;
3813 if (conf->rxmode.hw_strip_crc) {
3814 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
3815 conf->rxmode.hw_strip_crc = 0;
3820 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
3821 * allocation or vector Rx preconditions we will reset it.
3823 adapter->rx_bulk_alloc_allowed = true;
3824 adapter->rx_vec_allowed = true;
3830 ixgbevf_dev_start(struct rte_eth_dev *dev)
3832 struct ixgbe_hw *hw =
3833 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3834 uint32_t intr_vector = 0;
3835 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
3839 PMD_INIT_FUNC_TRACE();
3841 hw->mac.ops.reset_hw(hw);
3842 hw->mac.get_link_status = true;
3844 /* negotiate mailbox API version to use with the PF. */
3845 ixgbevf_negotiate_api(hw);
3847 ixgbevf_dev_tx_init(dev);
3849 /* This can fail when allocating mbufs for descriptor rings */
3850 err = ixgbevf_dev_rx_init(dev);
3852 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
3853 ixgbe_dev_clear_queues(dev);
3858 ixgbevf_set_vfta_all(dev,1);
3861 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
3862 ETH_VLAN_EXTEND_MASK;
3863 ixgbevf_vlan_offload_set(dev, mask);
3865 ixgbevf_dev_rxtx_start(dev);
3867 /* check and configure queue intr-vector mapping */
3868 if (dev->data->dev_conf.intr_conf.rxq != 0) {
3869 intr_vector = dev->data->nb_rx_queues;
3870 if (rte_intr_efd_enable(intr_handle, intr_vector))
3874 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3875 intr_handle->intr_vec =
3876 rte_zmalloc("intr_vec",
3877 dev->data->nb_rx_queues * sizeof(int), 0);
3878 if (intr_handle->intr_vec == NULL) {
3879 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
3880 " intr_vec\n", dev->data->nb_rx_queues);
3884 ixgbevf_configure_msix(dev);
3886 rte_intr_enable(intr_handle);
3888 /* Re-enable interrupt for VF */
3889 ixgbevf_intr_enable(hw);
3895 ixgbevf_dev_stop(struct rte_eth_dev *dev)
3897 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3898 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
3900 PMD_INIT_FUNC_TRACE();
3902 hw->adapter_stopped = 1;
3903 ixgbe_stop_adapter(hw);
3906 * Clear what we set, but we still keep shadow_vfta to
3907 * restore after device starts
3909 ixgbevf_set_vfta_all(dev,0);
3911 /* Clear stored conf */
3912 dev->data->scattered_rx = 0;
3914 ixgbe_dev_clear_queues(dev);
3916 /* disable intr eventfd mapping */
3917 rte_intr_disable(intr_handle);
3919 /* Clean datapath event and queue/vec mapping */
3920 rte_intr_efd_disable(intr_handle);
3921 if (intr_handle->intr_vec != NULL) {
3922 rte_free(intr_handle->intr_vec);
3923 intr_handle->intr_vec = NULL;
3928 ixgbevf_dev_close(struct rte_eth_dev *dev)
3930 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3932 PMD_INIT_FUNC_TRACE();
3936 ixgbevf_dev_stop(dev);
3938 ixgbe_dev_free_queues(dev);
3940 /* reprogram the RAR[0] in case user changed it. */
3941 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
3944 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3946 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3947 struct ixgbe_vfta * shadow_vfta =
3948 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3949 int i = 0, j = 0, vfta = 0, mask = 1;
3951 for (i = 0; i < IXGBE_VFTA_SIZE; i++){
3952 vfta = shadow_vfta->vfta[i];
3955 for (j = 0; j < 32; j++){
3957 ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
3966 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3968 struct ixgbe_hw *hw =
3969 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3970 struct ixgbe_vfta * shadow_vfta =
3971 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3972 uint32_t vid_idx = 0;
3973 uint32_t vid_bit = 0;
3976 PMD_INIT_FUNC_TRACE();
3978 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
3979 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
3981 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
3984 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3985 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3987 /* Save what we set and retore it after device reset */
3989 shadow_vfta->vfta[vid_idx] |= vid_bit;
3991 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
3997 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
3999 struct ixgbe_hw *hw =
4000 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4003 PMD_INIT_FUNC_TRACE();
4005 if(queue >= hw->mac.max_rx_queues)
4008 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
4010 ctrl |= IXGBE_RXDCTL_VME;
4012 ctrl &= ~IXGBE_RXDCTL_VME;
4013 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
4015 ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
4019 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4021 struct ixgbe_hw *hw =
4022 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4026 /* VF function only support hw strip feature, others are not support */
4027 if(mask & ETH_VLAN_STRIP_MASK){
4028 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
4030 for(i=0; i < hw->mac.max_rx_queues; i++)
4031 ixgbevf_vlan_strip_queue_set(dev,i,on);
4036 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
4040 /* we only need to do this if VMDq is enabled */
4041 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4042 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
4043 PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
4051 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
4053 uint32_t vector = 0;
4054 switch (hw->mac.mc_filter_type) {
4055 case 0: /* use bits [47:36] of the address */
4056 vector = ((uc_addr->addr_bytes[4] >> 4) |
4057 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
4059 case 1: /* use bits [46:35] of the address */
4060 vector = ((uc_addr->addr_bytes[4] >> 3) |
4061 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
4063 case 2: /* use bits [45:34] of the address */
4064 vector = ((uc_addr->addr_bytes[4] >> 2) |
4065 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
4067 case 3: /* use bits [43:32] of the address */
4068 vector = ((uc_addr->addr_bytes[4]) |
4069 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
4071 default: /* Invalid mc_filter_type */
4075 /* vector can only be 12-bits or boundary will be exceeded */
4081 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
4089 const uint32_t ixgbe_uta_idx_mask = 0x7F;
4090 const uint32_t ixgbe_uta_bit_shift = 5;
4091 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
4092 const uint32_t bit1 = 0x1;
4094 struct ixgbe_hw *hw =
4095 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4096 struct ixgbe_uta_info *uta_info =
4097 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
4099 /* The UTA table only exists on 82599 hardware and newer */
4100 if (hw->mac.type < ixgbe_mac_82599EB)
4103 vector = ixgbe_uta_vector(hw,mac_addr);
4104 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
4105 uta_shift = vector & ixgbe_uta_bit_mask;
4107 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
4111 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
4113 uta_info->uta_in_use++;
4114 reg_val |= (bit1 << uta_shift);
4115 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
4117 uta_info->uta_in_use--;
4118 reg_val &= ~(bit1 << uta_shift);
4119 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
4122 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
4124 if (uta_info->uta_in_use > 0)
4125 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
4126 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
4128 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
4134 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
4137 struct ixgbe_hw *hw =
4138 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4139 struct ixgbe_uta_info *uta_info =
4140 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
4142 /* The UTA table only exists on 82599 hardware and newer */
4143 if (hw->mac.type < ixgbe_mac_82599EB)
4147 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
4148 uta_info->uta_shadow[i] = ~0;
4149 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
4152 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
4153 uta_info->uta_shadow[i] = 0;
4154 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
4162 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
4164 uint32_t new_val = orig_val;
4166 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
4167 new_val |= IXGBE_VMOLR_AUPE;
4168 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
4169 new_val |= IXGBE_VMOLR_ROMPE;
4170 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
4171 new_val |= IXGBE_VMOLR_ROPE;
4172 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
4173 new_val |= IXGBE_VMOLR_BAM;
4174 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
4175 new_val |= IXGBE_VMOLR_MPE;
4181 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
4182 uint16_t rx_mask, uint8_t on)
4186 struct ixgbe_hw *hw =
4187 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4188 uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4190 if (hw->mac.type == ixgbe_mac_82598EB) {
4191 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
4192 " on 82599 hardware and newer");
4195 if (ixgbe_vmdq_mode_check(hw) < 0)
4198 val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
4205 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4211 ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
4215 const uint8_t bit1 = 0x1;
4217 struct ixgbe_hw *hw =
4218 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4220 if (ixgbe_vmdq_mode_check(hw) < 0)
4223 addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
4224 reg = IXGBE_READ_REG(hw, addr);
4232 IXGBE_WRITE_REG(hw, addr,reg);
4238 ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
4242 const uint8_t bit1 = 0x1;
4244 struct ixgbe_hw *hw =
4245 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4247 if (ixgbe_vmdq_mode_check(hw) < 0)
4250 addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
4251 reg = IXGBE_READ_REG(hw, addr);
4259 IXGBE_WRITE_REG(hw, addr,reg);
4265 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
4266 uint64_t pool_mask, uint8_t vlan_on)
4270 struct ixgbe_hw *hw =
4271 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4273 if (ixgbe_vmdq_mode_check(hw) < 0)
4275 for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
4276 if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
4277 ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
4285 #define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
4286 #define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
4287 #define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
4288 #define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */
4289 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
4290 ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
4291 ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
4294 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
4295 struct rte_eth_mirror_conf *mirror_conf,
4296 uint8_t rule_id, uint8_t on)
4298 uint32_t mr_ctl,vlvf;
4299 uint32_t mp_lsb = 0;
4300 uint32_t mv_msb = 0;
4301 uint32_t mv_lsb = 0;
4302 uint32_t mp_msb = 0;
4305 uint64_t vlan_mask = 0;
4307 const uint8_t pool_mask_offset = 32;
4308 const uint8_t vlan_mask_offset = 32;
4309 const uint8_t dst_pool_offset = 8;
4310 const uint8_t rule_mr_offset = 4;
4311 const uint8_t mirror_rule_mask= 0x0F;
4313 struct ixgbe_mirror_info *mr_info =
4314 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
4315 struct ixgbe_hw *hw =
4316 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4317 uint8_t mirror_type = 0;
4319 if (ixgbe_vmdq_mode_check(hw) < 0)
4322 if (rule_id >= IXGBE_MAX_MIRROR_RULES)
4325 if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
4326 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
4327 mirror_conf->rule_type);
4331 if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
4332 mirror_type |= IXGBE_MRCTL_VLME;
4333 /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
4334 for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
4335 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
4336 /* search vlan id related pool vlan filter index */
4337 reg_index = ixgbe_find_vlvf_slot(hw,
4338 mirror_conf->vlan.vlan_id[i]);
4341 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
4342 if ((vlvf & IXGBE_VLVF_VIEN) &&
4343 ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
4344 mirror_conf->vlan.vlan_id[i]))
4345 vlan_mask |= (1ULL << reg_index);
4352 mv_lsb = vlan_mask & 0xFFFFFFFF;
4353 mv_msb = vlan_mask >> vlan_mask_offset;
4355 mr_info->mr_conf[rule_id].vlan.vlan_mask =
4356 mirror_conf->vlan.vlan_mask;
4357 for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
4358 if(mirror_conf->vlan.vlan_mask & (1ULL << i))
4359 mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
4360 mirror_conf->vlan.vlan_id[i];
4365 mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
4366 for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
4367 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
4372 * if enable pool mirror, write related pool mask register,if disable
4373 * pool mirror, clear PFMRVM register
4375 if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
4376 mirror_type |= IXGBE_MRCTL_VPME;
4378 mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
4379 mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
4380 mr_info->mr_conf[rule_id].pool_mask =
4381 mirror_conf->pool_mask;
4386 mr_info->mr_conf[rule_id].pool_mask = 0;
4389 if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
4390 mirror_type |= IXGBE_MRCTL_UPME;
4391 if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
4392 mirror_type |= IXGBE_MRCTL_DPME;
4394 /* read mirror control register and recalculate it */
4395 mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
4398 mr_ctl |= mirror_type;
4399 mr_ctl &= mirror_rule_mask;
4400 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
4402 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
4404 mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
4405 mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
4407 /* write mirrror control register */
4408 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
4410 /* write pool mirrror control register */
4411 if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
4412 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
4413 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
4416 /* write VLAN mirrror control register */
4417 if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
4418 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
4419 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
4427 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
4430 uint32_t lsb_val = 0;
4431 uint32_t msb_val = 0;
4432 const uint8_t rule_mr_offset = 4;
4434 struct ixgbe_hw *hw =
4435 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4436 struct ixgbe_mirror_info *mr_info =
4437 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
4439 if (ixgbe_vmdq_mode_check(hw) < 0)
4442 memset(&mr_info->mr_conf[rule_id], 0,
4443 sizeof(struct rte_eth_mirror_conf));
4445 /* clear PFVMCTL register */
4446 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
4448 /* clear pool mask register */
4449 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
4450 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
4452 /* clear vlan mask register */
4453 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
4454 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
4460 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4463 struct ixgbe_hw *hw =
4464 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4466 mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
4467 mask |= (1 << IXGBE_MISC_VEC_ID);
4468 RTE_SET_USED(queue_id);
4469 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
4471 rte_intr_enable(&dev->pci_dev->intr_handle);
4477 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4480 struct ixgbe_hw *hw =
4481 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4483 mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
4484 mask &= ~(1 << IXGBE_MISC_VEC_ID);
4485 RTE_SET_USED(queue_id);
4486 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
4492 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
4495 struct ixgbe_hw *hw =
4496 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4497 struct ixgbe_interrupt *intr =
4498 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4500 if (queue_id < 16) {
4501 ixgbe_disable_intr(hw);
4502 intr->mask |= (1 << queue_id);
4503 ixgbe_enable_intr(dev);
4504 } else if (queue_id < 32) {
4505 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
4506 mask &= (1 << queue_id);
4507 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4508 } else if (queue_id < 64) {
4509 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
4510 mask &= (1 << (queue_id - 32));
4511 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4513 rte_intr_enable(&dev->pci_dev->intr_handle);
4519 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
4522 struct ixgbe_hw *hw =
4523 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4524 struct ixgbe_interrupt *intr =
4525 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4527 if (queue_id < 16) {
4528 ixgbe_disable_intr(hw);
4529 intr->mask &= ~(1 << queue_id);
4530 ixgbe_enable_intr(dev);
4531 } else if (queue_id < 32) {
4532 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
4533 mask &= ~(1 << queue_id);
4534 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4535 } else if (queue_id < 64) {
4536 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
4537 mask &= ~(1 << (queue_id - 32));
4538 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4545 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
4546 uint8_t queue, uint8_t msix_vector)
4550 if (direction == -1) {
4552 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4553 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
4556 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
4558 /* rx or tx cause */
4559 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4560 idx = ((16 * (queue & 1)) + (8 * direction));
4561 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
4562 tmp &= ~(0xFF << idx);
4563 tmp |= (msix_vector << idx);
4564 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
4569 * set the IVAR registers, mapping interrupt causes to vectors
4571 * pointer to ixgbe_hw struct
4573 * 0 for Rx, 1 for Tx, -1 for other causes
4575 * queue to map the corresponding interrupt to
4577 * the vector to map to the corresponding queue
4580 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
4581 uint8_t queue, uint8_t msix_vector)
4585 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4586 if (hw->mac.type == ixgbe_mac_82598EB) {
4587 if (direction == -1)
4589 idx = (((direction * 64) + queue) >> 2) & 0x1F;
4590 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
4591 tmp &= ~(0xFF << (8 * (queue & 0x3)));
4592 tmp |= (msix_vector << (8 * (queue & 0x3)));
4593 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
4594 } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
4595 (hw->mac.type == ixgbe_mac_X540)) {
4596 if (direction == -1) {
4598 idx = ((queue & 1) * 8);
4599 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4600 tmp &= ~(0xFF << idx);
4601 tmp |= (msix_vector << idx);
4602 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
4604 /* rx or tx causes */
4605 idx = ((16 * (queue & 1)) + (8 * direction));
4606 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
4607 tmp &= ~(0xFF << idx);
4608 tmp |= (msix_vector << idx);
4609 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
4615 ixgbevf_configure_msix(struct rte_eth_dev *dev)
4617 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4618 struct ixgbe_hw *hw =
4619 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4621 uint32_t vector_idx = IXGBE_MISC_VEC_ID;
4623 /* won't configure msix register if no mapping is done
4624 * between intr vector and event fd.
4626 if (!rte_intr_dp_is_en(intr_handle))
4629 /* Configure all RX queues of VF */
4630 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
4631 /* Force all queue use vector 0,
4632 * as IXGBE_VF_MAXMSIVECOTR = 1
4634 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
4635 intr_handle->intr_vec[q_idx] = vector_idx;
4638 /* Configure VF other cause ivar */
4639 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
4643 * Sets up the hardware to properly generate MSI-X interrupts
4645 * board private structure
4648 ixgbe_configure_msix(struct rte_eth_dev *dev)
4650 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
4651 struct ixgbe_hw *hw =
4652 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4653 uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
4654 uint32_t vec = IXGBE_MISC_VEC_ID;
4658 /* won't configure msix register if no mapping is done
4659 * between intr vector and event fd
4661 if (!rte_intr_dp_is_en(intr_handle))
4664 if (rte_intr_allow_others(intr_handle))
4665 vec = base = IXGBE_RX_VEC_START;
4667 /* setup GPIE for MSI-x mode */
4668 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4669 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
4670 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
4671 /* auto clearing and auto setting corresponding bits in EIMS
4672 * when MSI-X interrupt is triggered
4674 if (hw->mac.type == ixgbe_mac_82598EB) {
4675 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4677 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4678 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4680 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4682 /* Populate the IVAR table and set the ITR values to the
4683 * corresponding register.
4685 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
4687 /* by default, 1:1 mapping */
4688 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
4689 intr_handle->intr_vec[queue_id] = vec;
4690 if (vec < base + intr_handle->nb_efd - 1)
4694 switch (hw->mac.type) {
4695 case ixgbe_mac_82598EB:
4696 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
4699 case ixgbe_mac_82599EB:
4700 case ixgbe_mac_X540:
4701 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
4706 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
4707 IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
4709 /* set up to autoclear timer, and the vectors */
4710 mask = IXGBE_EIMS_ENABLE_MASK;
4711 mask &= ~(IXGBE_EIMS_OTHER |
4712 IXGBE_EIMS_MAILBOX |
4715 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4718 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
4719 uint16_t queue_idx, uint16_t tx_rate)
4721 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4722 uint32_t rf_dec, rf_int;
4724 uint16_t link_speed = dev->data->dev_link.link_speed;
4726 if (queue_idx >= hw->mac.max_tx_queues)
4730 /* Calculate the rate factor values to set */
4731 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
4732 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
4733 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
4735 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
4736 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
4737 IXGBE_RTTBCNRC_RF_INT_MASK_M);
4738 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
4744 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
4745 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
4748 if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
4749 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
4750 IXGBE_MAX_JUMBO_FRAME_SIZE))
4751 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
4752 IXGBE_MMW_SIZE_JUMBO_FRAME);
4754 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
4755 IXGBE_MMW_SIZE_DEFAULT);
4757 /* Set RTTBCNRC of queue X */
4758 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
4759 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
4760 IXGBE_WRITE_FLUSH(hw);
4765 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
4766 uint16_t tx_rate, uint64_t q_msk)
4768 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4769 struct ixgbe_vf_info *vfinfo =
4770 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
4771 uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
4772 uint32_t queue_stride =
4773 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
4774 uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
4775 uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
4776 uint16_t total_rate = 0;
4778 if (queue_end >= hw->mac.max_tx_queues)
4781 if (vfinfo != NULL) {
4782 for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
4785 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
4787 total_rate += vfinfo[vf_idx].tx_rate[idx];
4792 /* Store tx_rate for this vf. */
4793 for (idx = 0; idx < nb_q_per_pool; idx++) {
4794 if (((uint64_t)0x1 << idx) & q_msk) {
4795 if (vfinfo[vf].tx_rate[idx] != tx_rate)
4796 vfinfo[vf].tx_rate[idx] = tx_rate;
4797 total_rate += tx_rate;
4801 if (total_rate > dev->data->dev_link.link_speed) {
4803 * Reset stored TX rate of the VF if it causes exceed
4806 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
4810 /* Set RTTBCNRC of each queue/pool for vf X */
4811 for (; queue_idx <= queue_end; queue_idx++) {
4813 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
4821 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4822 __attribute__((unused)) uint32_t index,
4823 __attribute__((unused)) uint32_t pool)
4825 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4829 * On a 82599 VF, adding again the same MAC addr is not an idempotent
4830 * operation. Trap this case to avoid exhausting the [very limited]
4831 * set of PF resources used to store VF MAC addresses.
4833 if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
4835 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
4838 PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
4842 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
4844 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4845 struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
4846 struct ether_addr *mac_addr;
4851 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
4852 * not support the deletion of a given MAC address.
4853 * Instead, it imposes to delete all MAC addresses, then to add again
4854 * all MAC addresses with the exception of the one to be deleted.
4856 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
4859 * Add again all MAC addresses, with the exception of the deleted one
4860 * and of the permanent MAC address.
4862 for (i = 0, mac_addr = dev->data->mac_addrs;
4863 i < hw->mac.num_rar_entries; i++, mac_addr++) {
4864 /* Skip the deleted MAC address */
4867 /* Skip NULL MAC addresses */
4868 if (is_zero_ether_addr(mac_addr))
4870 /* Skip the permanent MAC address */
4871 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
4873 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
4876 "Adding again MAC address "
4877 "%02x:%02x:%02x:%02x:%02x:%02x failed "
4879 mac_addr->addr_bytes[0],
4880 mac_addr->addr_bytes[1],
4881 mac_addr->addr_bytes[2],
4882 mac_addr->addr_bytes[3],
4883 mac_addr->addr_bytes[4],
4884 mac_addr->addr_bytes[5],
4890 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4892 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4894 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
4897 #define MAC_TYPE_FILTER_SUP(type) do {\
4898 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
4899 (type) != ixgbe_mac_X550)\
4904 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
4905 struct rte_eth_syn_filter *filter,
4908 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4911 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
4914 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
4917 if (synqf & IXGBE_SYN_FILTER_ENABLE)
4919 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
4920 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
4922 if (filter->hig_pri)
4923 synqf |= IXGBE_SYN_FILTER_SYNQFP;
4925 synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
4927 if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
4929 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
4931 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
4932 IXGBE_WRITE_FLUSH(hw);
4937 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
4938 struct rte_eth_syn_filter *filter)
4940 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4941 uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
4943 if (synqf & IXGBE_SYN_FILTER_ENABLE) {
4944 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
4945 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
4952 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
4953 enum rte_filter_op filter_op,
4956 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4959 MAC_TYPE_FILTER_SUP(hw->mac.type);
4961 if (filter_op == RTE_ETH_FILTER_NOP)
4965 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
4970 switch (filter_op) {
4971 case RTE_ETH_FILTER_ADD:
4972 ret = ixgbe_syn_filter_set(dev,
4973 (struct rte_eth_syn_filter *)arg,
4976 case RTE_ETH_FILTER_DELETE:
4977 ret = ixgbe_syn_filter_set(dev,
4978 (struct rte_eth_syn_filter *)arg,
4981 case RTE_ETH_FILTER_GET:
4982 ret = ixgbe_syn_filter_get(dev,
4983 (struct rte_eth_syn_filter *)arg);
4986 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
4995 static inline enum ixgbe_5tuple_protocol
4996 convert_protocol_type(uint8_t protocol_value)
4998 if (protocol_value == IPPROTO_TCP)
4999 return IXGBE_FILTER_PROTOCOL_TCP;
5000 else if (protocol_value == IPPROTO_UDP)
5001 return IXGBE_FILTER_PROTOCOL_UDP;
5002 else if (protocol_value == IPPROTO_SCTP)
5003 return IXGBE_FILTER_PROTOCOL_SCTP;
5005 return IXGBE_FILTER_PROTOCOL_NONE;
5009 * add a 5tuple filter
5012 * dev: Pointer to struct rte_eth_dev.
5013 * index: the index the filter allocates.
5014 * filter: ponter to the filter that will be added.
5015 * rx_queue: the queue id the filter assigned to.
5018 * - On success, zero.
5019 * - On failure, a negative value.
5022 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
5023 struct ixgbe_5tuple_filter *filter)
5025 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5026 struct ixgbe_filter_info *filter_info =
5027 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5029 uint32_t ftqf, sdpqf;
5030 uint32_t l34timir = 0;
5031 uint8_t mask = 0xff;
5034 * look for an unused 5tuple filter index,
5035 * and insert the filter to list.
5037 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
5038 idx = i / (sizeof(uint32_t) * NBBY);
5039 shift = i % (sizeof(uint32_t) * NBBY);
5040 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
5041 filter_info->fivetuple_mask[idx] |= 1 << shift;
5043 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
5049 if (i >= IXGBE_MAX_FTQF_FILTERS) {
5050 PMD_DRV_LOG(ERR, "5tuple filters are full.");
5054 sdpqf = (uint32_t)(filter->filter_info.dst_port <<
5055 IXGBE_SDPQF_DSTPORT_SHIFT);
5056 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
5058 ftqf = (uint32_t)(filter->filter_info.proto &
5059 IXGBE_FTQF_PROTOCOL_MASK);
5060 ftqf |= (uint32_t)((filter->filter_info.priority &
5061 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
5062 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
5063 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
5064 if (filter->filter_info.dst_ip_mask == 0)
5065 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
5066 if (filter->filter_info.src_port_mask == 0)
5067 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
5068 if (filter->filter_info.dst_port_mask == 0)
5069 mask &= IXGBE_FTQF_DEST_PORT_MASK;
5070 if (filter->filter_info.proto_mask == 0)
5071 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
5072 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
5073 ftqf |= IXGBE_FTQF_POOL_MASK_EN;
5074 ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
5076 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
5077 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
5078 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
5079 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
5081 l34timir |= IXGBE_L34T_IMIR_RESERVE;
5082 l34timir |= (uint32_t)(filter->queue <<
5083 IXGBE_L34T_IMIR_QUEUE_SHIFT);
5084 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
5089 * remove a 5tuple filter
5092 * dev: Pointer to struct rte_eth_dev.
5093 * filter: the pointer of the filter will be removed.
5096 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
5097 struct ixgbe_5tuple_filter *filter)
5099 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5100 struct ixgbe_filter_info *filter_info =
5101 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5102 uint16_t index = filter->index;
5104 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
5105 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
5106 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
5109 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
5110 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
5111 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
5112 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
5113 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
5117 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
5119 struct ixgbe_hw *hw;
5120 uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
5122 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5124 if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
5127 /* refuse mtu that requires the support of scattered packets when this
5128 * feature has not been enabled before. */
5129 if (!dev->data->scattered_rx &&
5130 (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
5131 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
5135 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
5136 * request of the version 2.0 of the mailbox API.
5137 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
5138 * of the mailbox API.
5139 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
5140 * prior to 3.11.33 which contains the following change:
5141 * "ixgbe: Enable jumbo frames support w/ SR-IOV"
5143 ixgbevf_rlpml_set_vf(hw, max_frame);
5145 /* update max frame size */
5146 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
5150 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
5151 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
5155 static inline struct ixgbe_5tuple_filter *
5156 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
5157 struct ixgbe_5tuple_filter_info *key)
5159 struct ixgbe_5tuple_filter *it;
5161 TAILQ_FOREACH(it, filter_list, entries) {
5162 if (memcmp(key, &it->filter_info,
5163 sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
5170 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
5172 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
5173 struct ixgbe_5tuple_filter_info *filter_info)
5175 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
5176 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
5177 filter->priority < IXGBE_5TUPLE_MIN_PRI)
5180 switch (filter->dst_ip_mask) {
5182 filter_info->dst_ip_mask = 0;
5183 filter_info->dst_ip = filter->dst_ip;
5186 filter_info->dst_ip_mask = 1;
5189 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
5193 switch (filter->src_ip_mask) {
5195 filter_info->src_ip_mask = 0;
5196 filter_info->src_ip = filter->src_ip;
5199 filter_info->src_ip_mask = 1;
5202 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
5206 switch (filter->dst_port_mask) {
5208 filter_info->dst_port_mask = 0;
5209 filter_info->dst_port = filter->dst_port;
5212 filter_info->dst_port_mask = 1;
5215 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
5219 switch (filter->src_port_mask) {
5221 filter_info->src_port_mask = 0;
5222 filter_info->src_port = filter->src_port;
5225 filter_info->src_port_mask = 1;
5228 PMD_DRV_LOG(ERR, "invalid src_port mask.");
5232 switch (filter->proto_mask) {
5234 filter_info->proto_mask = 0;
5235 filter_info->proto =
5236 convert_protocol_type(filter->proto);
5239 filter_info->proto_mask = 1;
5242 PMD_DRV_LOG(ERR, "invalid protocol mask.");
5246 filter_info->priority = (uint8_t)filter->priority;
5251 * add or delete a ntuple filter
5254 * dev: Pointer to struct rte_eth_dev.
5255 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
5256 * add: if true, add filter, if false, remove filter
5259 * - On success, zero.
5260 * - On failure, a negative value.
5263 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
5264 struct rte_eth_ntuple_filter *ntuple_filter,
5267 struct ixgbe_filter_info *filter_info =
5268 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5269 struct ixgbe_5tuple_filter_info filter_5tuple;
5270 struct ixgbe_5tuple_filter *filter;
5273 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
5274 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
5278 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
5279 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
5283 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
5285 if (filter != NULL && add) {
5286 PMD_DRV_LOG(ERR, "filter exists.");
5289 if (filter == NULL && !add) {
5290 PMD_DRV_LOG(ERR, "filter doesn't exist.");
5295 filter = rte_zmalloc("ixgbe_5tuple_filter",
5296 sizeof(struct ixgbe_5tuple_filter), 0);
5299 (void)rte_memcpy(&filter->filter_info,
5301 sizeof(struct ixgbe_5tuple_filter_info));
5302 filter->queue = ntuple_filter->queue;
5303 ret = ixgbe_add_5tuple_filter(dev, filter);
5309 ixgbe_remove_5tuple_filter(dev, filter);
5315 * get a ntuple filter
5318 * dev: Pointer to struct rte_eth_dev.
5319 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
5322 * - On success, zero.
5323 * - On failure, a negative value.
5326 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
5327 struct rte_eth_ntuple_filter *ntuple_filter)
5329 struct ixgbe_filter_info *filter_info =
5330 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5331 struct ixgbe_5tuple_filter_info filter_5tuple;
5332 struct ixgbe_5tuple_filter *filter;
5335 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
5336 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
5340 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
5341 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
5345 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
5347 if (filter == NULL) {
5348 PMD_DRV_LOG(ERR, "filter doesn't exist.");
5351 ntuple_filter->queue = filter->queue;
5356 * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
5357 * @dev: pointer to rte_eth_dev structure
5358 * @filter_op:operation will be taken.
5359 * @arg: a pointer to specific structure corresponding to the filter_op
5362 * - On success, zero.
5363 * - On failure, a negative value.
5366 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
5367 enum rte_filter_op filter_op,
5370 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5373 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
5375 if (filter_op == RTE_ETH_FILTER_NOP)
5379 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
5384 switch (filter_op) {
5385 case RTE_ETH_FILTER_ADD:
5386 ret = ixgbe_add_del_ntuple_filter(dev,
5387 (struct rte_eth_ntuple_filter *)arg,
5390 case RTE_ETH_FILTER_DELETE:
5391 ret = ixgbe_add_del_ntuple_filter(dev,
5392 (struct rte_eth_ntuple_filter *)arg,
5395 case RTE_ETH_FILTER_GET:
5396 ret = ixgbe_get_ntuple_filter(dev,
5397 (struct rte_eth_ntuple_filter *)arg);
5400 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
5408 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
5413 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
5414 if (filter_info->ethertype_filters[i] == ethertype &&
5415 (filter_info->ethertype_mask & (1 << i)))
5422 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
5427 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
5428 if (!(filter_info->ethertype_mask & (1 << i))) {
5429 filter_info->ethertype_mask |= 1 << i;
5430 filter_info->ethertype_filters[i] = ethertype;
5438 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
5441 if (idx >= IXGBE_MAX_ETQF_FILTERS)
5443 filter_info->ethertype_mask &= ~(1 << idx);
5444 filter_info->ethertype_filters[idx] = 0;
5449 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
5450 struct rte_eth_ethertype_filter *filter,
5453 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5454 struct ixgbe_filter_info *filter_info =
5455 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5460 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
5463 if (filter->ether_type == ETHER_TYPE_IPv4 ||
5464 filter->ether_type == ETHER_TYPE_IPv6) {
5465 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
5466 " ethertype filter.", filter->ether_type);
5470 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
5471 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
5474 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
5475 PMD_DRV_LOG(ERR, "drop option is unsupported.");
5479 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
5480 if (ret >= 0 && add) {
5481 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
5482 filter->ether_type);
5485 if (ret < 0 && !add) {
5486 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
5487 filter->ether_type);
5492 ret = ixgbe_ethertype_filter_insert(filter_info,
5493 filter->ether_type);
5495 PMD_DRV_LOG(ERR, "ethertype filters are full.");
5498 etqf = IXGBE_ETQF_FILTER_EN;
5499 etqf |= (uint32_t)filter->ether_type;
5500 etqs |= (uint32_t)((filter->queue <<
5501 IXGBE_ETQS_RX_QUEUE_SHIFT) &
5502 IXGBE_ETQS_RX_QUEUE);
5503 etqs |= IXGBE_ETQS_QUEUE_EN;
5505 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
5509 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
5510 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
5511 IXGBE_WRITE_FLUSH(hw);
5517 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
5518 struct rte_eth_ethertype_filter *filter)
5520 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5521 struct ixgbe_filter_info *filter_info =
5522 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5523 uint32_t etqf, etqs;
5526 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
5528 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
5529 filter->ether_type);
5533 etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
5534 if (etqf & IXGBE_ETQF_FILTER_EN) {
5535 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
5536 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
5538 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
5539 IXGBE_ETQS_RX_QUEUE_SHIFT;
5546 * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
5547 * @dev: pointer to rte_eth_dev structure
5548 * @filter_op:operation will be taken.
5549 * @arg: a pointer to specific structure corresponding to the filter_op
5552 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
5553 enum rte_filter_op filter_op,
5556 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5559 MAC_TYPE_FILTER_SUP(hw->mac.type);
5561 if (filter_op == RTE_ETH_FILTER_NOP)
5565 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
5570 switch (filter_op) {
5571 case RTE_ETH_FILTER_ADD:
5572 ret = ixgbe_add_del_ethertype_filter(dev,
5573 (struct rte_eth_ethertype_filter *)arg,
5576 case RTE_ETH_FILTER_DELETE:
5577 ret = ixgbe_add_del_ethertype_filter(dev,
5578 (struct rte_eth_ethertype_filter *)arg,
5581 case RTE_ETH_FILTER_GET:
5582 ret = ixgbe_get_ethertype_filter(dev,
5583 (struct rte_eth_ethertype_filter *)arg);
5586 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
5594 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
5595 enum rte_filter_type filter_type,
5596 enum rte_filter_op filter_op,
5601 switch (filter_type) {
5602 case RTE_ETH_FILTER_NTUPLE:
5603 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
5605 case RTE_ETH_FILTER_ETHERTYPE:
5606 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
5608 case RTE_ETH_FILTER_SYN:
5609 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
5611 case RTE_ETH_FILTER_FDIR:
5612 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
5615 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5624 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
5625 u8 **mc_addr_ptr, u32 *vmdq)
5630 mc_addr = *mc_addr_ptr;
5631 *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
5636 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
5637 struct ether_addr *mc_addr_set,
5638 uint32_t nb_mc_addr)
5640 struct ixgbe_hw *hw;
5643 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5644 mc_addr_list = (u8 *)mc_addr_set;
5645 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
5646 ixgbe_dev_addr_list_itr, TRUE);
5650 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
5652 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5653 uint64_t systime_cycles;
5655 switch (hw->mac.type) {
5656 case ixgbe_mac_X550:
5657 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
5658 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
5659 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
5663 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
5664 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
5668 return systime_cycles;
5672 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
5674 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5675 uint64_t rx_tstamp_cycles;
5677 switch (hw->mac.type) {
5678 case ixgbe_mac_X550:
5679 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
5680 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
5681 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
5685 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
5686 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
5687 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
5691 return rx_tstamp_cycles;
5695 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
5697 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5698 uint64_t tx_tstamp_cycles;
5700 switch (hw->mac.type) {
5701 case ixgbe_mac_X550:
5702 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
5703 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
5704 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
5708 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
5709 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
5710 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
5714 return tx_tstamp_cycles;
5718 ixgbe_start_timecounters(struct rte_eth_dev *dev)
5720 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5721 struct ixgbe_adapter *adapter =
5722 (struct ixgbe_adapter *)dev->data->dev_private;
5723 struct rte_eth_link link;
5724 uint32_t incval = 0;
5727 /* Get current link speed. */
5728 memset(&link, 0, sizeof(link));
5729 ixgbe_dev_link_update(dev, 1);
5730 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
5732 switch (link.link_speed) {
5733 case ETH_LINK_SPEED_100:
5734 incval = IXGBE_INCVAL_100;
5735 shift = IXGBE_INCVAL_SHIFT_100;
5737 case ETH_LINK_SPEED_1000:
5738 incval = IXGBE_INCVAL_1GB;
5739 shift = IXGBE_INCVAL_SHIFT_1GB;
5741 case ETH_LINK_SPEED_10000:
5743 incval = IXGBE_INCVAL_10GB;
5744 shift = IXGBE_INCVAL_SHIFT_10GB;
5748 switch (hw->mac.type) {
5749 case ixgbe_mac_X550:
5750 /* Independent of link speed. */
5752 /* Cycles read will be interpreted as ns. */
5755 case ixgbe_mac_X540:
5756 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
5758 case ixgbe_mac_82599EB:
5759 incval >>= IXGBE_INCVAL_SHIFT_82599;
5760 shift -= IXGBE_INCVAL_SHIFT_82599;
5761 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
5762 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
5765 /* Not supported. */
5769 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
5770 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5771 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5773 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
5774 adapter->systime_tc.cc_shift = shift;
5775 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
5777 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
5778 adapter->rx_tstamp_tc.cc_shift = shift;
5779 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
5781 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
5782 adapter->tx_tstamp_tc.cc_shift = shift;
5783 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
5787 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
5789 struct ixgbe_adapter *adapter =
5790 (struct ixgbe_adapter *)dev->data->dev_private;
5792 adapter->systime_tc.nsec += delta;
5793 adapter->rx_tstamp_tc.nsec += delta;
5794 adapter->tx_tstamp_tc.nsec += delta;
5800 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
5803 struct ixgbe_adapter *adapter =
5804 (struct ixgbe_adapter *)dev->data->dev_private;
5806 ns = rte_timespec_to_ns(ts);
5807 /* Set the timecounters to a new value. */
5808 adapter->systime_tc.nsec = ns;
5809 adapter->rx_tstamp_tc.nsec = ns;
5810 adapter->tx_tstamp_tc.nsec = ns;
5816 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
5818 uint64_t ns, systime_cycles;
5819 struct ixgbe_adapter *adapter =
5820 (struct ixgbe_adapter *)dev->data->dev_private;
5822 systime_cycles = ixgbe_read_systime_cyclecounter(dev);
5823 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
5824 *ts = rte_ns_to_timespec(ns);
5830 ixgbe_timesync_enable(struct rte_eth_dev *dev)
5832 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5836 /* Stop the timesync system time. */
5837 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
5838 /* Reset the timesync system time value. */
5839 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
5840 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
5842 /* Enable system time for platforms where it isn't on by default. */
5843 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
5844 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
5845 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
5847 ixgbe_start_timecounters(dev);
5849 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5850 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
5852 IXGBE_ETQF_FILTER_EN |
5855 /* Enable timestamping of received PTP packets. */
5856 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5857 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
5858 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
5860 /* Enable timestamping of transmitted PTP packets. */
5861 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5862 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
5863 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
5865 IXGBE_WRITE_FLUSH(hw);
5871 ixgbe_timesync_disable(struct rte_eth_dev *dev)
5873 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5876 /* Disable timestamping of transmitted PTP packets. */
5877 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5878 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
5879 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
5881 /* Disable timestamping of received PTP packets. */
5882 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5883 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
5884 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
5886 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5887 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
5889 /* Stop incrementating the System Time registers. */
5890 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
5896 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
5897 struct timespec *timestamp,
5898 uint32_t flags __rte_unused)
5900 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5901 struct ixgbe_adapter *adapter =
5902 (struct ixgbe_adapter *)dev->data->dev_private;
5903 uint32_t tsync_rxctl;
5904 uint64_t rx_tstamp_cycles;
5907 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
5908 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
5911 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
5912 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
5913 *timestamp = rte_ns_to_timespec(ns);
5919 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
5920 struct timespec *timestamp)
5922 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5923 struct ixgbe_adapter *adapter =
5924 (struct ixgbe_adapter *)dev->data->dev_private;
5925 uint32_t tsync_txctl;
5926 uint64_t tx_tstamp_cycles;
5929 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
5930 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
5933 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
5934 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
5935 *timestamp = rte_ns_to_timespec(ns);
5941 ixgbe_get_reg_length(struct rte_eth_dev *dev)
5943 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5946 const struct reg_info *reg_group;
5947 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
5948 ixgbe_regs_mac_82598EB : ixgbe_regs_others;
5950 while ((reg_group = reg_set[g_ind++]))
5951 count += ixgbe_regs_group_count(reg_group);
5957 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
5961 const struct reg_info *reg_group;
5963 while ((reg_group = ixgbevf_regs[g_ind++]))
5964 count += ixgbe_regs_group_count(reg_group);
5970 ixgbe_get_regs(struct rte_eth_dev *dev,
5971 struct rte_dev_reg_info *regs)
5973 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5974 uint32_t *data = regs->data;
5977 const struct reg_info *reg_group;
5978 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
5979 ixgbe_regs_mac_82598EB : ixgbe_regs_others;
5981 /* Support only full register dump */
5982 if ((regs->length == 0) ||
5983 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
5984 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
5986 while ((reg_group = reg_set[g_ind++]))
5987 count += ixgbe_read_regs_group(dev, &data[count],
5996 ixgbevf_get_regs(struct rte_eth_dev *dev,
5997 struct rte_dev_reg_info *regs)
5999 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6000 uint32_t *data = regs->data;
6003 const struct reg_info *reg_group;
6005 /* Support only full register dump */
6006 if ((regs->length == 0) ||
6007 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
6008 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
6010 while ((reg_group = ixgbevf_regs[g_ind++]))
6011 count += ixgbe_read_regs_group(dev, &data[count],
6020 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
6022 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6024 /* Return unit is byte count */
6025 return hw->eeprom.word_size * 2;
6029 ixgbe_get_eeprom(struct rte_eth_dev *dev,
6030 struct rte_dev_eeprom_info *in_eeprom)
6032 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6033 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
6034 uint16_t *data = in_eeprom->data;
6037 first = in_eeprom->offset >> 1;
6038 length = in_eeprom->length >> 1;
6039 if ((first > hw->eeprom.word_size) ||
6040 ((first + length) > hw->eeprom.word_size))
6043 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
6045 return eeprom->ops.read_buffer(hw, first, length, data);
6049 ixgbe_set_eeprom(struct rte_eth_dev *dev,
6050 struct rte_dev_eeprom_info *in_eeprom)
6052 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6053 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
6054 uint16_t *data = in_eeprom->data;
6057 first = in_eeprom->offset >> 1;
6058 length = in_eeprom->length >> 1;
6059 if ((first > hw->eeprom.word_size) ||
6060 ((first + length) > hw->eeprom.word_size))
6063 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
6065 return eeprom->ops.write_buffer(hw, first, length, data);
6069 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
6071 case ixgbe_mac_X550:
6072 case ixgbe_mac_X550EM_x:
6073 return ETH_RSS_RETA_SIZE_512;
6074 case ixgbe_mac_X550_vf:
6075 case ixgbe_mac_X550EM_x_vf:
6076 return ETH_RSS_RETA_SIZE_64;
6078 return ETH_RSS_RETA_SIZE_128;
6083 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
6085 case ixgbe_mac_X550:
6086 case ixgbe_mac_X550EM_x:
6087 if (reta_idx < ETH_RSS_RETA_SIZE_128)
6088 return IXGBE_RETA(reta_idx >> 2);
6090 return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
6091 case ixgbe_mac_X550_vf:
6092 case ixgbe_mac_X550EM_x_vf:
6093 return IXGBE_VFRETA(reta_idx >> 2);
6095 return IXGBE_RETA(reta_idx >> 2);
6100 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
6102 case ixgbe_mac_X550_vf:
6103 case ixgbe_mac_X550EM_x_vf:
6104 return IXGBE_VFMRQC;
6111 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
6113 case ixgbe_mac_X550_vf:
6114 case ixgbe_mac_X550EM_x_vf:
6115 return IXGBE_VFRSSRK(i);
6117 return IXGBE_RSSRK(i);
6122 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
6124 case ixgbe_mac_82599_vf:
6125 case ixgbe_mac_X540_vf:
6133 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
6134 struct rte_eth_dcb_info *dcb_info)
6136 struct ixgbe_dcb_config *dcb_config =
6137 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
6138 struct ixgbe_dcb_tc_config *tc;
6141 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
6142 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
6144 dcb_info->nb_tcs = 1;
6146 if (dcb_config->vt_mode) { /* vt is enabled*/
6147 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
6148 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
6149 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
6150 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
6151 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
6152 for (j = 0; j < dcb_info->nb_tcs; j++) {
6153 dcb_info->tc_queue.tc_rxq[i][j].base =
6154 i * dcb_info->nb_tcs + j;
6155 dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
6156 dcb_info->tc_queue.tc_txq[i][j].base =
6157 i * dcb_info->nb_tcs + j;
6158 dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
6161 } else { /* vt is disabled*/
6162 struct rte_eth_dcb_rx_conf *rx_conf =
6163 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
6164 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
6165 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
6166 if (dcb_info->nb_tcs == ETH_4_TCS) {
6167 for (i = 0; i < dcb_info->nb_tcs; i++) {
6168 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
6169 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
6171 dcb_info->tc_queue.tc_txq[0][0].base = 0;
6172 dcb_info->tc_queue.tc_txq[0][1].base = 64;
6173 dcb_info->tc_queue.tc_txq[0][2].base = 96;
6174 dcb_info->tc_queue.tc_txq[0][3].base = 112;
6175 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
6176 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
6177 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
6178 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
6179 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
6180 for (i = 0; i < dcb_info->nb_tcs; i++) {
6181 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
6182 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
6184 dcb_info->tc_queue.tc_txq[0][0].base = 0;
6185 dcb_info->tc_queue.tc_txq[0][1].base = 32;
6186 dcb_info->tc_queue.tc_txq[0][2].base = 64;
6187 dcb_info->tc_queue.tc_txq[0][3].base = 80;
6188 dcb_info->tc_queue.tc_txq[0][4].base = 96;
6189 dcb_info->tc_queue.tc_txq[0][5].base = 104;
6190 dcb_info->tc_queue.tc_txq[0][6].base = 112;
6191 dcb_info->tc_queue.tc_txq[0][7].base = 120;
6192 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
6193 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
6194 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
6195 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
6196 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
6197 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
6198 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
6199 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
6202 for (i = 0; i < dcb_info->nb_tcs; i++) {
6203 tc = &dcb_config->tc_config[i];
6204 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
6209 static struct rte_driver rte_ixgbe_driver = {
6211 .init = rte_ixgbe_pmd_init,
6214 static struct rte_driver rte_ixgbevf_driver = {
6216 .init = rte_ixgbevf_pmd_init,
6219 PMD_REGISTER_DRIVER(rte_ixgbe_driver);
6220 PMD_REGISTER_DRIVER(rte_ixgbevf_driver);