1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
5 #include <rte_ethdev_driver.h>
7 #include "base/ixgbe_api.h"
8 #include "base/ixgbe_x550.h"
9 #include "ixgbe_ethdev.h"
10 #include "rte_pmd_ixgbe.h"
13 rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
14 struct ether_addr *mac_addr)
17 struct ixgbe_vf_info *vfinfo;
19 uint8_t *new_mac = (uint8_t *)(mac_addr);
20 struct rte_eth_dev *dev;
21 struct rte_pci_device *pci_dev;
23 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
25 dev = &rte_eth_devices[port];
26 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
28 if (!is_ixgbe_supported(dev))
31 if (vf >= pci_dev->max_vfs)
34 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
35 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
36 rar_entry = hw->mac.num_rar_entries - (vf + 1);
38 if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
39 rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
41 return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
48 rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf)
51 struct ixgbe_vf_info *vfinfo;
52 struct rte_eth_dev *dev;
53 struct rte_pci_device *pci_dev;
56 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
58 dev = &rte_eth_devices[port];
59 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
61 if (!is_ixgbe_supported(dev))
64 if (vf >= pci_dev->max_vfs)
67 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
68 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
70 ctrl = IXGBE_PF_CONTROL_MSG;
71 if (vfinfo[vf].clear_to_send)
72 ctrl |= IXGBE_VT_MSGTYPE_CTS;
74 ixgbe_write_mbx(hw, &ctrl, 1, vf);
80 rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
83 struct ixgbe_mac_info *mac;
84 struct rte_eth_dev *dev;
85 struct rte_pci_device *pci_dev;
87 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
89 dev = &rte_eth_devices[port];
90 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
92 if (!is_ixgbe_supported(dev))
95 if (vf >= pci_dev->max_vfs)
101 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
104 mac->ops.set_vlan_anti_spoofing(hw, on, vf);
110 rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
113 struct ixgbe_mac_info *mac;
114 struct rte_eth_dev *dev;
115 struct rte_pci_device *pci_dev;
117 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
119 dev = &rte_eth_devices[port];
120 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
122 if (!is_ixgbe_supported(dev))
125 if (vf >= pci_dev->max_vfs)
131 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
133 mac->ops.set_mac_anti_spoofing(hw, on, vf);
139 rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id)
143 struct rte_eth_dev *dev;
144 struct rte_pci_device *pci_dev;
146 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
148 dev = &rte_eth_devices[port];
149 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
151 if (!is_ixgbe_supported(dev))
154 if (vf >= pci_dev->max_vfs)
157 if (vlan_id > ETHER_MAX_VLAN_ID)
160 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
161 ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
164 ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
169 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
175 rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on)
179 struct rte_eth_dev *dev;
181 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
183 dev = &rte_eth_devices[port];
185 if (!is_ixgbe_supported(dev))
191 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
192 ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
193 /* enable or disable VMDQ loopback */
195 ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
197 ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
199 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
205 rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on)
210 int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
211 struct rte_eth_dev *dev;
213 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
215 dev = &rte_eth_devices[port];
217 if (!is_ixgbe_supported(dev))
223 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
224 for (i = 0; i <= num_queues; i++) {
225 reg_value = IXGBE_QDE_WRITE |
226 (i << IXGBE_QDE_IDX_SHIFT) |
227 (on & IXGBE_QDE_ENABLE);
228 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
235 rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on)
239 struct rte_eth_dev *dev;
240 struct rte_pci_device *pci_dev;
242 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
244 dev = &rte_eth_devices[port];
245 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
247 if (!is_ixgbe_supported(dev))
250 /* only support VF's 0 to 63 */
251 if ((vf >= pci_dev->max_vfs) || (vf > 63))
257 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
258 reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
260 reg_value |= IXGBE_SRRCTL_DROP_EN;
262 reg_value &= ~IXGBE_SRRCTL_DROP_EN;
264 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
270 rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
272 struct rte_eth_dev *dev;
273 struct rte_pci_device *pci_dev;
275 uint16_t queues_per_pool;
278 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
280 dev = &rte_eth_devices[port];
281 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
282 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
284 if (!is_ixgbe_supported(dev))
287 if (vf >= pci_dev->max_vfs)
293 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
295 /* The PF has 128 queue pairs and in SRIOV configuration
296 * those queues will be assigned to VF's, so RXDCTL
297 * registers will be dealing with queues which will be
299 * Let's say we have SRIOV configured with 31 VF's then the
300 * first 124 queues 0-123 will be allocated to VF's and only
301 * the last 4 queues 123-127 will be assigned to the PF.
303 if (hw->mac.type == ixgbe_mac_82598EB)
304 queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
307 queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
310 for (q = 0; q < queues_per_pool; q++)
311 (*dev->dev_ops->vlan_strip_queue_set)(dev,
312 q + vf * queues_per_pool, on);
317 rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf,
318 uint16_t rx_mask, uint8_t on)
321 struct rte_eth_dev *dev;
322 struct rte_pci_device *pci_dev;
326 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
328 dev = &rte_eth_devices[port];
329 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
331 if (!is_ixgbe_supported(dev))
334 if (vf >= pci_dev->max_vfs)
340 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
341 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
343 if (hw->mac.type == ixgbe_mac_82598EB) {
344 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
345 " on 82599 hardware and newer");
348 if (ixgbe_vt_check(hw) < 0)
351 val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
358 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
364 rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on)
366 struct rte_eth_dev *dev;
367 struct rte_pci_device *pci_dev;
370 const uint8_t bit1 = 0x1;
373 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
375 dev = &rte_eth_devices[port];
376 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
378 if (!is_ixgbe_supported(dev))
381 if (vf >= pci_dev->max_vfs)
387 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
389 if (ixgbe_vt_check(hw) < 0)
392 /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
394 addr = IXGBE_VFRE(1);
395 val = bit1 << (vf - 32);
397 addr = IXGBE_VFRE(0);
401 reg = IXGBE_READ_REG(hw, addr);
408 IXGBE_WRITE_REG(hw, addr, reg);
414 rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on)
416 struct rte_eth_dev *dev;
417 struct rte_pci_device *pci_dev;
420 const uint8_t bit1 = 0x1;
424 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
426 dev = &rte_eth_devices[port];
427 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
429 if (!is_ixgbe_supported(dev))
432 if (vf >= pci_dev->max_vfs)
438 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
439 if (ixgbe_vt_check(hw) < 0)
442 /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
444 addr = IXGBE_VFTE(1);
445 val = bit1 << (vf - 32);
447 addr = IXGBE_VFTE(0);
451 reg = IXGBE_READ_REG(hw, addr);
458 IXGBE_WRITE_REG(hw, addr, reg);
464 rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
465 uint64_t vf_mask, uint8_t vlan_on)
467 struct rte_eth_dev *dev;
472 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
474 dev = &rte_eth_devices[port];
476 if (!is_ixgbe_supported(dev))
479 if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
482 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
483 if (ixgbe_vt_check(hw) < 0)
486 for (vf_idx = 0; vf_idx < 64; vf_idx++) {
487 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
488 ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
499 rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
500 uint16_t tx_rate, uint64_t q_msk)
502 struct rte_eth_dev *dev;
504 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
506 dev = &rte_eth_devices[port];
508 if (!is_ixgbe_supported(dev))
511 return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
515 rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp)
518 struct rte_eth_dev *dev;
521 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
523 dev = &rte_eth_devices[port];
525 if (!is_ixgbe_supported(dev))
528 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
530 /* Stop the data paths */
531 if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
535 * As no ixgbe_disable_sec_rx_path equivalent is
536 * implemented for tx in the base code, and we are
537 * not allowed to modify the base code in DPDK, so
538 * just call the hand-written one directly for now.
539 * The hardware support has been checked by
540 * ixgbe_disable_sec_rx_path().
542 ixgbe_disable_sec_tx_path_generic(hw);
544 /* Enable Ethernet CRC (required by MACsec offload) */
545 ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
546 ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
547 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
549 /* Enable the TX and RX crypto engines */
550 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
551 ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
552 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
554 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
555 ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
556 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
558 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
559 ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
561 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
563 /* Enable SA lookup */
564 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
565 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
566 ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
567 IXGBE_LSECTXCTRL_AUTH;
568 ctrl |= IXGBE_LSECTXCTRL_AISCI;
569 ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
570 ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
571 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
573 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
574 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
575 ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
576 ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
578 ctrl |= IXGBE_LSECRXCTRL_RP;
580 ctrl &= ~IXGBE_LSECRXCTRL_RP;
581 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
583 /* Start the data paths */
584 ixgbe_enable_sec_rx_path(hw);
587 * As no ixgbe_enable_sec_rx_path equivalent is
588 * implemented for tx in the base code, and we are
589 * not allowed to modify the base code in DPDK, so
590 * just call the hand-written one directly for now.
592 ixgbe_enable_sec_tx_path_generic(hw);
598 rte_pmd_ixgbe_macsec_disable(uint16_t port)
601 struct rte_eth_dev *dev;
604 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
606 dev = &rte_eth_devices[port];
608 if (!is_ixgbe_supported(dev))
611 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
613 /* Stop the data paths */
614 if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
618 * As no ixgbe_disable_sec_rx_path equivalent is
619 * implemented for tx in the base code, and we are
620 * not allowed to modify the base code in DPDK, so
621 * just call the hand-written one directly for now.
622 * The hardware support has been checked by
623 * ixgbe_disable_sec_rx_path().
625 ixgbe_disable_sec_tx_path_generic(hw);
627 /* Disable the TX and RX crypto engines */
628 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
629 ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
630 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
632 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
633 ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
634 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
636 /* Disable SA lookup */
637 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
638 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
639 ctrl |= IXGBE_LSECTXCTRL_DISABLE;
640 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
642 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
643 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
644 ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
645 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
647 /* Start the data paths */
648 ixgbe_enable_sec_rx_path(hw);
651 * As no ixgbe_enable_sec_rx_path equivalent is
652 * implemented for tx in the base code, and we are
653 * not allowed to modify the base code in DPDK, so
654 * just call the hand-written one directly for now.
656 ixgbe_enable_sec_tx_path_generic(hw);
662 rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac)
665 struct rte_eth_dev *dev;
668 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
670 dev = &rte_eth_devices[port];
672 if (!is_ixgbe_supported(dev))
675 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
677 ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
678 IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
680 ctrl = mac[4] | (mac[5] << 8);
681 IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
687 rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi)
690 struct rte_eth_dev *dev;
693 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
695 dev = &rte_eth_devices[port];
697 if (!is_ixgbe_supported(dev))
700 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
702 ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
703 IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
705 pi = rte_cpu_to_be_16(pi);
706 ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
707 IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
713 rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
714 uint32_t pn, uint8_t *key)
717 struct rte_eth_dev *dev;
720 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
722 dev = &rte_eth_devices[port];
724 if (!is_ixgbe_supported(dev))
727 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
729 if (idx != 0 && idx != 1)
735 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
737 /* Set the PN and key */
738 pn = rte_cpu_to_be_32(pn);
740 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
742 for (i = 0; i < 4; i++) {
743 ctrl = (key[i * 4 + 0] << 0) |
744 (key[i * 4 + 1] << 8) |
745 (key[i * 4 + 2] << 16) |
746 (key[i * 4 + 3] << 24);
747 IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
750 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
752 for (i = 0; i < 4; i++) {
753 ctrl = (key[i * 4 + 0] << 0) |
754 (key[i * 4 + 1] << 8) |
755 (key[i * 4 + 2] << 16) |
756 (key[i * 4 + 3] << 24);
757 IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
761 /* Set AN and select the SA */
762 ctrl = (an << idx * 2) | (idx << 4);
763 IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
769 rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
770 uint32_t pn, uint8_t *key)
773 struct rte_eth_dev *dev;
776 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
778 dev = &rte_eth_devices[port];
780 if (!is_ixgbe_supported(dev))
783 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
785 if (idx != 0 && idx != 1)
792 pn = rte_cpu_to_be_32(pn);
793 IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
796 for (i = 0; i < 4; i++) {
797 ctrl = (key[i * 4 + 0] << 0) |
798 (key[i * 4 + 1] << 8) |
799 (key[i * 4 + 2] << 16) |
800 (key[i * 4 + 3] << 24);
801 IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
804 /* Set the AN and validate the SA */
805 ctrl = an | (1 << 2);
806 IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
812 rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
816 struct rte_eth_dev *dev;
817 struct ixgbe_dcb_config *dcb_config;
818 struct ixgbe_dcb_tc_config *tc;
819 struct rte_eth_conf *eth_conf;
820 struct ixgbe_bw_conf *bw_conf;
825 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
827 dev = &rte_eth_devices[port];
829 if (!is_ixgbe_supported(dev))
832 if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
833 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
834 IXGBE_DCB_MAX_TRAFFIC_CLASS);
838 dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
839 bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
840 eth_conf = &dev->data->dev_conf;
842 if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
843 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
844 } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
845 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
854 if (nb_tcs != tc_num) {
856 "Weight should be set for all %d enabled TCs.",
862 for (i = 0; i < nb_tcs; i++)
866 "The summary of the TC weight should be 100.");
870 for (i = 0; i < nb_tcs; i++) {
871 tc = &dcb_config->tc_config[i];
872 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
874 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
875 tc = &dcb_config->tc_config[i];
876 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
879 bw_conf->tc_num = nb_tcs;
884 int __rte_experimental
885 rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable)
888 struct rte_eth_dev *dev;
891 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
892 dev = &rte_eth_devices[port];
893 if (!is_ixgbe_supported(dev))
896 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
900 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
902 /* If 'enable' set the SBP bit else clear it */
904 fctrl |= IXGBE_FCTRL_SBP;
906 fctrl &= ~(IXGBE_FCTRL_SBP);
908 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
912 #ifdef RTE_LIBRTE_IXGBE_BYPASS
914 rte_pmd_ixgbe_bypass_init(uint16_t port_id)
916 struct rte_eth_dev *dev;
918 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
920 dev = &rte_eth_devices[port_id];
921 if (!is_ixgbe_supported(dev))
924 ixgbe_bypass_init(dev);
929 rte_pmd_ixgbe_bypass_state_show(uint16_t port_id, uint32_t *state)
931 struct rte_eth_dev *dev;
933 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
935 dev = &rte_eth_devices[port_id];
936 if (!is_ixgbe_supported(dev))
939 return ixgbe_bypass_state_show(dev, state);
943 rte_pmd_ixgbe_bypass_state_set(uint16_t port_id, uint32_t *new_state)
945 struct rte_eth_dev *dev;
947 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
949 dev = &rte_eth_devices[port_id];
950 if (!is_ixgbe_supported(dev))
953 return ixgbe_bypass_state_store(dev, new_state);
957 rte_pmd_ixgbe_bypass_event_show(uint16_t port_id,
961 struct rte_eth_dev *dev;
963 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
965 dev = &rte_eth_devices[port_id];
966 if (!is_ixgbe_supported(dev))
969 return ixgbe_bypass_event_show(dev, event, state);
973 rte_pmd_ixgbe_bypass_event_store(uint16_t port_id,
977 struct rte_eth_dev *dev;
979 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
981 dev = &rte_eth_devices[port_id];
982 if (!is_ixgbe_supported(dev))
985 return ixgbe_bypass_event_store(dev, event, state);
989 rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port_id, uint32_t timeout)
991 struct rte_eth_dev *dev;
993 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
995 dev = &rte_eth_devices[port_id];
996 if (!is_ixgbe_supported(dev))
999 return ixgbe_bypass_wd_timeout_store(dev, timeout);
1003 rte_pmd_ixgbe_bypass_ver_show(uint16_t port_id, uint32_t *ver)
1005 struct rte_eth_dev *dev;
1007 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1009 dev = &rte_eth_devices[port_id];
1010 if (!is_ixgbe_supported(dev))
1013 return ixgbe_bypass_ver_show(dev, ver);
1017 rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port_id, uint32_t *wd_timeout)
1019 struct rte_eth_dev *dev;
1021 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1023 dev = &rte_eth_devices[port_id];
1024 if (!is_ixgbe_supported(dev))
1027 return ixgbe_bypass_wd_timeout_show(dev, wd_timeout);
1031 rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)
1033 struct rte_eth_dev *dev;
1035 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1037 dev = &rte_eth_devices[port_id];
1038 if (!is_ixgbe_supported(dev))
1041 return ixgbe_bypass_wd_reset(dev);
1046 * rte_pmd_ixgbe_acquire_swfw - Acquire SWFW semaphore
1047 * @hw: pointer to hardware structure
1048 * @mask: Mask to specify which semaphore to acquire
1050 * Acquires the SWFW semaphore and get the shared phy token as needed
1052 STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask)
1054 int retries = FW_PHY_TOKEN_RETRIES;
1055 s32 status = IXGBE_SUCCESS;
1058 status = ixgbe_acquire_swfw_semaphore(hw, mask);
1060 PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n",
1064 status = ixgbe_get_phy_token(hw);
1065 if (status == IXGBE_SUCCESS)
1066 return IXGBE_SUCCESS;
1068 if (status == IXGBE_ERR_TOKEN_RETRY)
1069 PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n",
1072 ixgbe_release_swfw_semaphore(hw, mask);
1073 if (status != IXGBE_ERR_TOKEN_RETRY) {
1075 "Retry get PHY token failed, Status=%d\n",
1080 PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n",
1086 * rte_pmd_ixgbe_release_swfw_sync - Release SWFW semaphore
1087 * @hw: pointer to hardware structure
1088 * @mask: Mask to specify which semaphore to release
1090 * Releases the SWFW semaphore and puts the shared phy token as needed
1092 STATIC void rte_pmd_ixgbe_release_swfw(struct ixgbe_hw *hw, u32 mask)
1094 ixgbe_put_phy_token(hw);
1095 ixgbe_release_swfw_semaphore(hw, mask);
1098 int __rte_experimental
1099 rte_pmd_ixgbe_mdio_lock(uint16_t port)
1101 struct ixgbe_hw *hw;
1102 struct rte_eth_dev *dev;
1105 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1106 dev = &rte_eth_devices[port];
1107 if (!is_ixgbe_supported(dev))
1110 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1115 swfw_mask = IXGBE_GSSR_PHY1_SM;
1117 swfw_mask = IXGBE_GSSR_PHY0_SM;
1119 if (rte_pmd_ixgbe_acquire_swfw(hw, swfw_mask))
1120 return IXGBE_ERR_SWFW_SYNC;
1122 return IXGBE_SUCCESS;
1125 int __rte_experimental
1126 rte_pmd_ixgbe_mdio_unlock(uint16_t port)
1128 struct rte_eth_dev *dev;
1129 struct ixgbe_hw *hw;
1132 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1134 dev = &rte_eth_devices[port];
1135 if (!is_ixgbe_supported(dev))
1138 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1143 swfw_mask = IXGBE_GSSR_PHY1_SM;
1145 swfw_mask = IXGBE_GSSR_PHY0_SM;
1147 rte_pmd_ixgbe_release_swfw(hw, swfw_mask);
1149 return IXGBE_SUCCESS;
1152 int __rte_experimental
1153 rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
1154 uint32_t dev_type, uint16_t *phy_data)
1156 struct ixgbe_hw *hw;
1157 struct rte_eth_dev *dev;
1158 u32 i, data, command;
1160 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1161 dev = &rte_eth_devices[port];
1162 if (!is_ixgbe_supported(dev))
1165 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1169 /* Setup and write the read command */
1170 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
1171 (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
1172 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
1173 IXGBE_MSCA_MDI_COMMAND;
1175 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
1177 /* Check every 10 usec to see if the access completed.
1178 * The MDI Command bit will clear when the operation is
1181 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1184 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
1185 if (!(command & IXGBE_MSCA_MDI_COMMAND))
1188 if (command & IXGBE_MSCA_MDI_COMMAND)
1189 return IXGBE_ERR_PHY;
1191 /* Read operation is complete. Get the data from MSRWD */
1192 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
1193 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
1194 *phy_data = (u16)data;
1199 int __rte_experimental
1200 rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
1201 uint32_t dev_type, uint16_t phy_data)
1203 struct ixgbe_hw *hw;
1205 struct rte_eth_dev *dev;
1207 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1208 dev = &rte_eth_devices[port];
1209 if (!is_ixgbe_supported(dev))
1212 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1216 /* Put the data in the MDI single read and write data register*/
1217 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
1219 /* Setup and write the write command */
1220 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
1221 (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
1222 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
1223 IXGBE_MSCA_MDI_COMMAND;
1225 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
1227 /* Check every 10 usec to see if the access completed.
1228 * The MDI Command bit will clear when the operation is
1231 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1234 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
1235 if (!(command & IXGBE_MSCA_MDI_COMMAND))
1238 if (command & IXGBE_MSCA_MDI_COMMAND) {
1239 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1240 "PHY write cmd didn't complete\n");
1241 return IXGBE_ERR_PHY;