1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
13 #include <rte_interrupts.h>
15 #include <rte_debug.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_memcpy.h>
20 #include <rte_malloc.h>
21 #include <rte_random.h>
23 #include "base/ixgbe_common.h"
24 #include "ixgbe_ethdev.h"
25 #include "rte_pmd_ixgbe.h"
27 #define IXGBE_MAX_VFTA (128)
28 #define IXGBE_VF_MSG_SIZE_DEFAULT 1
29 #define IXGBE_VF_GET_QUEUE_MSG_SIZE 5
30 #define IXGBE_ETHERTYPE_FLOW_CTRL 0x8808
32 static inline uint16_t
33 dev_num_vf(struct rte_eth_dev *eth_dev)
35 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
37 return pci_dev->max_vfs;
41 int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
43 unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];
44 struct ixgbe_vf_info *vfinfo =
45 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
48 for (vfn = 0; vfn < vf_num; vfn++) {
49 rte_eth_random_addr(vf_mac_addr);
50 /* keep the random address as default */
51 memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
59 ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
61 struct ixgbe_interrupt *intr =
62 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
64 intr->mask |= IXGBE_EICR_MAILBOX;
69 int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
71 struct ixgbe_vf_info **vfinfo =
72 IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
73 struct ixgbe_mirror_info *mirror_info =
74 IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
75 struct ixgbe_uta_info *uta_info =
76 IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
78 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
83 PMD_INIT_FUNC_TRACE();
85 RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
86 vf_num = dev_num_vf(eth_dev);
90 *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
92 rte_panic("Cannot allocate memory for private VF data\n");
94 ret = rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
97 "failed to allocate switch domain for device %d", ret);
103 memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
104 memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
105 hw->mac.mc_filter_type = 0;
107 if (vf_num >= ETH_32_POOLS) {
109 RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
110 } else if (vf_num >= ETH_16_POOLS) {
112 RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
115 RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
118 RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
119 RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
120 RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
122 ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
124 /* init_mailbox_params */
125 hw->mbx.ops.init_params(hw);
127 /* set mb interrupt mask */
128 ixgbe_mb_intr_setup(eth_dev);
133 void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
135 struct ixgbe_vf_info **vfinfo;
139 PMD_INIT_FUNC_TRACE();
141 RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
142 RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
143 RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
144 RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
146 vf_num = dev_num_vf(eth_dev);
150 vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
154 ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
156 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
163 ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
165 struct ixgbe_hw *hw =
166 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
167 struct ixgbe_filter_info *filter_info =
168 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
171 struct ixgbe_ethertype_filter ethertype_filter;
173 if (!hw->mac.ops.set_ethertype_anti_spoofing) {
174 PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported.\n");
178 i = ixgbe_ethertype_filter_lookup(filter_info,
179 IXGBE_ETHERTYPE_FLOW_CTRL);
181 PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!\n");
185 ethertype_filter.ethertype = IXGBE_ETHERTYPE_FLOW_CTRL;
186 ethertype_filter.etqf = IXGBE_ETQF_FILTER_EN |
187 IXGBE_ETQF_TX_ANTISPOOF |
188 IXGBE_ETHERTYPE_FLOW_CTRL;
189 ethertype_filter.etqs = 0;
190 ethertype_filter.conf = TRUE;
191 i = ixgbe_ethertype_filter_insert(filter_info,
194 PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control.\n");
198 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
199 (IXGBE_ETQF_FILTER_EN |
200 IXGBE_ETQF_TX_ANTISPOOF |
201 IXGBE_ETHERTYPE_FLOW_CTRL));
203 vf_num = dev_num_vf(eth_dev);
204 for (i = 0; i < vf_num; i++)
205 hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
208 int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
210 uint32_t vtctl, fcrth;
211 uint32_t vfre_slot, vfre_offset;
213 const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
214 const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
215 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
216 uint32_t gpie, gcr_ext;
220 vf_num = dev_num_vf(eth_dev);
224 /* enable VMDq and set the default pool for PF */
225 vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
226 vtctl |= IXGBE_VMD_CTL_VMDQ_EN;
227 vtctl &= ~IXGBE_VT_CTL_POOL_MASK;
228 vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
229 << IXGBE_VT_CTL_POOL_SHIFT;
230 vtctl |= IXGBE_VT_CTL_REPLEN;
231 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
233 vfre_offset = vf_num & VFRE_MASK;
234 vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
236 /* Enable pools reserved to PF only */
237 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0U) << vfre_offset);
238 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1);
239 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0U) << vfre_offset);
240 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1);
242 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
243 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
245 /* clear VMDq map to perment rar 0 */
246 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
248 /* clear VMDq map to scan rar 127 */
249 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0);
250 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0);
252 /* set VMDq map to default PF pool */
253 hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
256 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
258 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
259 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
261 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
262 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
263 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
265 switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
267 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
268 gpie |= IXGBE_GPIE_VTMODE_64;
271 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
272 gpie |= IXGBE_GPIE_VTMODE_32;
275 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
276 gpie |= IXGBE_GPIE_VTMODE_16;
280 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
281 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
284 * enable vlan filtering and allow all vlan tags through
286 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
287 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
288 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
290 /* VFTA - enable all vlan filters */
291 for (i = 0; i < IXGBE_MAX_VFTA; i++)
292 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
294 /* Enable MAC Anti-Spoofing */
295 hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
297 /* set flow control threshold to max to avoid tx switch hang */
298 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
299 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
300 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
301 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
304 ixgbe_add_tx_flow_control_drop_filter(eth_dev);
310 set_rx_mode(struct rte_eth_dev *dev)
312 struct rte_eth_dev_data *dev_data = dev->data;
313 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
314 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
315 uint16_t vfn = dev_num_vf(dev);
317 /* Check for Promiscuous and All Multicast modes */
318 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
320 /* set all bits that we expect to always be set */
321 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
322 fctrl |= IXGBE_FCTRL_BAM;
324 /* clear the bits we are changing the status of */
325 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
327 if (dev_data->promiscuous) {
328 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
329 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
331 if (dev_data->all_multicast) {
332 fctrl |= IXGBE_FCTRL_MPE;
333 vmolr |= IXGBE_VMOLR_MPE;
335 vmolr |= IXGBE_VMOLR_ROMPE;
339 if (hw->mac.type != ixgbe_mac_82598EB) {
340 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) &
341 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
343 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr);
346 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
348 ixgbe_vlan_hw_strip_config(dev);
352 ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
354 struct ixgbe_hw *hw =
355 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
356 struct ixgbe_vf_info *vfinfo =
357 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
358 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
359 uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
361 vmolr |= (IXGBE_VMOLR_ROPE |
362 IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
363 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
365 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
367 /* reset multicast table array for vf */
368 vfinfo[vf].num_vf_mc_hashes = 0;
373 hw->mac.ops.clear_rar(hw, rar_entry);
377 ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
379 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
381 uint32_t reg_offset, vf_shift;
382 const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
383 const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
384 uint8_t nb_q_per_pool;
387 vf_shift = vf & VFRE_MASK;
388 reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
390 /* enable transmit for vf */
391 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
392 reg |= (reg | (1 << vf_shift));
393 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
395 /* enable all queue drop for IOV */
396 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
397 for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
398 IXGBE_WRITE_FLUSH(hw);
399 reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE;
400 reg |= i << IXGBE_QDE_IDX_SHIFT;
401 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
404 /* enable receive for vf */
405 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
406 reg |= (reg | (1 << vf_shift));
407 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
409 /* Enable counting of spoofed packets in the SSVPC register */
410 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
411 reg |= (1 << vf_shift);
412 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
414 ixgbe_vf_reset_event(dev, vf);
418 ixgbe_disable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf)
420 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
423 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
425 PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf);
427 vmolr &= ~IXGBE_VMOLR_MPE;
429 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
435 ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
437 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
438 struct ixgbe_vf_info *vfinfo =
439 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
440 unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
441 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
442 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
444 ixgbe_vf_reset_msg(dev, vf);
446 hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV);
448 /* Disable multicast promiscuous at reset */
449 ixgbe_disable_vf_mc_promisc(dev, vf);
451 /* reply to reset with ack and vf mac address */
452 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
453 rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
455 * Piggyback the multicast filter type so VF can compute the
458 msgbuf[3] = hw->mac.mc_filter_type;
459 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
465 ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
467 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
468 struct ixgbe_vf_info *vfinfo =
469 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
470 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
471 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
473 if (rte_is_valid_assigned_ether_addr(
474 (struct rte_ether_addr *)new_mac)) {
475 rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
476 return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
482 ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
484 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
485 struct ixgbe_vf_info *vfinfo =
486 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
487 int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
488 IXGBE_VT_MSGINFO_SHIFT;
489 uint16_t *hash_list = (uint16_t *)&msgbuf[1];
492 const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F;
493 const uint32_t IXGBE_MTA_BIT_SHIFT = 5;
494 const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1;
497 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
499 /* Disable multicast promiscuous first */
500 ixgbe_disable_vf_mc_promisc(dev, vf);
502 /* only so many hash values supported */
503 nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES);
505 /* store the mc entries */
506 vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
507 for (i = 0; i < nb_entries; i++) {
508 vfinfo->vf_mc_hashes[i] = hash_list[i];
511 if (nb_entries == 0) {
512 vmolr &= ~IXGBE_VMOLR_ROMPE;
513 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
517 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
518 mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT)
519 & IXGBE_MTA_INDEX_MASK;
520 mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK;
521 reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx));
522 reg_val |= (1 << mta_shift);
523 IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val);
526 vmolr |= IXGBE_VMOLR_ROMPE;
527 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
533 ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
536 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
537 struct ixgbe_vf_info *vfinfo =
538 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
540 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
541 >> IXGBE_VT_MSGINFO_SHIFT;
542 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
545 vfinfo[vf].vlan_count++;
546 else if (vfinfo[vf].vlan_count)
547 vfinfo[vf].vlan_count--;
548 return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add, false);
552 ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
554 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
555 uint32_t new_mtu = msgbuf[1];
558 int max_frame = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
560 /* X540 and X550 support jumbo frames in IOV mode */
561 if (hw->mac.type != ixgbe_mac_X540 &&
562 hw->mac.type != ixgbe_mac_X550 &&
563 hw->mac.type != ixgbe_mac_X550EM_x &&
564 hw->mac.type != ixgbe_mac_X550EM_a)
567 if (max_frame < RTE_ETHER_MIN_LEN ||
568 max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
571 max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
572 IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
573 if (max_frs < new_mtu) {
574 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
575 if (new_mtu > RTE_ETHER_MAX_LEN) {
576 dev->data->dev_conf.rxmode.offloads |=
577 DEV_RX_OFFLOAD_JUMBO_FRAME;
578 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
580 dev->data->dev_conf.rxmode.offloads &=
581 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
582 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
584 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
586 max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
587 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
594 ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
596 uint32_t api_version = msgbuf[1];
597 struct ixgbe_vf_info *vfinfo =
598 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
600 switch (api_version) {
601 case ixgbe_mbox_api_10:
602 case ixgbe_mbox_api_11:
603 case ixgbe_mbox_api_12:
604 case ixgbe_mbox_api_13:
605 vfinfo[vf].api_version = (uint8_t)api_version;
611 PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n",
618 ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
620 struct ixgbe_vf_info *vfinfo =
621 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
622 uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
623 struct rte_eth_conf *eth_conf;
624 struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf;
628 #define IXGBE_VMVIR_VLANA_MASK 0xC0000000
629 #define IXGBE_VMVIR_VLAN_VID_MASK 0x00000FFF
630 #define IXGBE_VMVIR_VLAN_UP_MASK 0x0000E000
631 #define VLAN_PRIO_SHIFT 13
636 /* Verify if the PF supports the mbox APIs version or not */
637 switch (vfinfo[vf].api_version) {
638 case ixgbe_mbox_api_20:
639 case ixgbe_mbox_api_11:
640 case ixgbe_mbox_api_12:
641 case ixgbe_mbox_api_13:
647 /* Notify VF of Rx and Tx queue number */
648 msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
649 msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
651 /* Notify VF of default queue */
652 msgbuf[IXGBE_VF_DEF_QUEUE] = default_q;
654 /* Notify VF of number of DCB traffic classes */
655 eth_conf = &dev->data->dev_conf;
656 switch (eth_conf->txmode.mq_mode) {
659 PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
660 ", but its tx mode = %d\n", vf,
661 eth_conf->txmode.mq_mode);
664 case ETH_MQ_TX_VMDQ_DCB:
665 vmdq_dcb_tx_conf = ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
666 switch (vmdq_dcb_tx_conf->nb_queue_pools) {
678 /* ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */
679 case ETH_MQ_TX_VMDQ_ONLY:
680 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
681 vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
682 vlana = vmvir & IXGBE_VMVIR_VLANA_MASK;
683 vid = vmvir & IXGBE_VMVIR_VLAN_VID_MASK;
685 (vmvir & IXGBE_VMVIR_VLAN_UP_MASK) >> VLAN_PRIO_SHIFT;
686 if ((vlana == IXGBE_VMVIR_VLANA_DEFAULT) &&
687 ((vid != 0) || (user_priority != 0)))
694 PMD_DRV_LOG(ERR, "PF work with invalid mode = %d\n",
695 eth_conf->txmode.mq_mode);
698 msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
704 ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
706 struct ixgbe_vf_info *vfinfo =
707 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
708 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
709 int xcast_mode = msgbuf[1]; /* msgbuf contains the flag to enable */
710 u32 vmolr, fctrl, disable, enable;
712 switch (vfinfo[vf].api_version) {
713 case ixgbe_mbox_api_12:
714 /* promisc introduced in 1.3 version */
715 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
719 case ixgbe_mbox_api_13:
725 if (vfinfo[vf].xcast_mode == xcast_mode)
728 switch (xcast_mode) {
729 case IXGBEVF_XCAST_MODE_NONE:
730 disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
731 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
734 case IXGBEVF_XCAST_MODE_MULTI:
735 disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
736 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
738 case IXGBEVF_XCAST_MODE_ALLMULTI:
739 disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
740 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
742 case IXGBEVF_XCAST_MODE_PROMISC:
743 if (hw->mac.type <= ixgbe_mac_82599EB)
746 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
747 if (!(fctrl & IXGBE_FCTRL_UPE)) {
748 /* VF promisc requires PF in promisc */
750 "Enabling VF promisc requires PF in promisc\n");
755 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
756 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
762 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
765 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
766 vfinfo[vf].xcast_mode = xcast_mode;
769 msgbuf[1] = xcast_mode;
775 ixgbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
777 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
778 struct ixgbe_vf_info *vf_info =
779 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
780 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
781 int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
782 IXGBE_VT_MSGINFO_SHIFT;
785 if (!rte_is_valid_assigned_ether_addr(
786 (struct rte_ether_addr *)new_mac)) {
787 PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf);
791 vf_info[vf].mac_count++;
793 hw->mac.ops.set_rar(hw, vf_info[vf].mac_count,
794 new_mac, vf, IXGBE_RAH_AV);
796 if (vf_info[vf].mac_count) {
797 hw->mac.ops.clear_rar(hw, vf_info[vf].mac_count);
798 vf_info[vf].mac_count = 0;
805 ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
807 uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE;
808 uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT;
809 uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
811 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
812 struct ixgbe_vf_info *vfinfo =
813 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
814 struct rte_pmd_ixgbe_mb_event_param ret_param;
816 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
818 PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
822 /* do nothing with the message already been processed */
823 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
826 /* flush the ack before we write any messages back */
827 IXGBE_WRITE_FLUSH(hw);
830 * initialise structure to send to user application
831 * will return response from user in retval field
833 ret_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED;
835 ret_param.msg_type = msgbuf[0] & 0xFFFF;
836 ret_param.msg = (void *)msgbuf;
838 /* perform VF reset */
839 if (msgbuf[0] == IXGBE_VF_RESET) {
840 int ret = ixgbe_vf_reset(dev, vf, msgbuf);
842 vfinfo[vf].clear_to_send = true;
844 /* notify application about VF reset */
845 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
851 * ask user application if we allowed to perform those functions
852 * if we get ret_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
853 * then business as usual,
854 * if 0, do nothing and send ACK to VF
855 * if ret_param.retval > 1, do nothing and send NAK to VF
857 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param);
859 retval = ret_param.retval;
861 /* check & process VF to PF mailbox message */
862 switch ((msgbuf[0] & 0xFFFF)) {
863 case IXGBE_VF_SET_MAC_ADDR:
864 if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
865 retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf);
867 case IXGBE_VF_SET_MULTICAST:
868 if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
869 retval = ixgbe_vf_set_multicast(dev, vf, msgbuf);
871 case IXGBE_VF_SET_LPE:
872 if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
873 retval = ixgbe_set_vf_lpe(dev, vf, msgbuf);
875 case IXGBE_VF_SET_VLAN:
876 if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
877 retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
879 case IXGBE_VF_API_NEGOTIATE:
880 retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
882 case IXGBE_VF_GET_QUEUES:
883 retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
884 msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE;
886 case IXGBE_VF_UPDATE_XCAST_MODE:
887 if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
888 retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf);
890 case IXGBE_VF_SET_MACVLAN:
891 if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
892 retval = ixgbe_set_vf_macvlan_msg(dev, vf, msgbuf);
895 PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
896 retval = IXGBE_ERR_MBX;
900 /* response the VF according to the message process result */
902 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
904 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
906 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
908 ixgbe_write_mbx(hw, msgbuf, msg_size, vf);
914 ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
916 uint32_t msg = IXGBE_VT_MSGTYPE_NACK;
917 struct ixgbe_hw *hw =
918 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
919 struct ixgbe_vf_info *vfinfo =
920 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
922 if (!vfinfo[vf].clear_to_send)
923 ixgbe_write_mbx(hw, &msg, 1, vf);
926 void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
929 struct ixgbe_hw *hw =
930 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
932 for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
933 /* check & process vf function level reset */
934 if (!ixgbe_check_for_rst(hw, vf))
935 ixgbe_vf_reset_event(eth_dev, vf);
937 /* check & process vf mailbox messages */
938 if (!ixgbe_check_for_msg(hw, vf))
939 ixgbe_rcv_msg_from_vf(eth_dev, vf);
941 /* check & process acks from vf */
942 if (!ixgbe_check_for_ack(hw, vf))
943 ixgbe_rcv_ack_from_vf(eth_dev, vf);