1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
5 #include <rte_ethdev_driver.h>
7 #include "base/ixgbe_api.h"
8 #include "base/ixgbe_x550.h"
9 #include "ixgbe_ethdev.h"
10 #include "rte_pmd_ixgbe.h"
13 rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
14 struct rte_ether_addr *mac_addr)
17 struct ixgbe_vf_info *vfinfo;
19 uint8_t *new_mac = (uint8_t *)(mac_addr);
20 struct rte_eth_dev *dev;
21 struct rte_pci_device *pci_dev;
23 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
25 dev = &rte_eth_devices[port];
26 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
28 if (!is_ixgbe_supported(dev))
31 if (vf >= pci_dev->max_vfs)
34 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
35 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
36 rar_entry = hw->mac.num_rar_entries - (vf + 1);
38 if (rte_is_valid_assigned_ether_addr(
39 (struct rte_ether_addr *)new_mac)) {
40 rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
42 return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
49 rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf)
52 struct ixgbe_vf_info *vfinfo;
53 struct rte_eth_dev *dev;
54 struct rte_pci_device *pci_dev;
57 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
59 dev = &rte_eth_devices[port];
60 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
62 if (!is_ixgbe_supported(dev))
65 if (vf >= pci_dev->max_vfs)
68 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
69 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
71 ctrl = IXGBE_PF_CONTROL_MSG;
72 if (vfinfo[vf].clear_to_send)
73 ctrl |= IXGBE_VT_MSGTYPE_CTS;
75 ixgbe_write_mbx(hw, &ctrl, 1, vf);
81 rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
84 struct ixgbe_mac_info *mac;
85 struct rte_eth_dev *dev;
86 struct rte_pci_device *pci_dev;
88 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
90 dev = &rte_eth_devices[port];
91 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
93 if (!is_ixgbe_supported(dev))
96 if (vf >= pci_dev->max_vfs)
102 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
105 mac->ops.set_vlan_anti_spoofing(hw, on, vf);
111 rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
114 struct ixgbe_mac_info *mac;
115 struct rte_eth_dev *dev;
116 struct rte_pci_device *pci_dev;
118 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
120 dev = &rte_eth_devices[port];
121 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
123 if (!is_ixgbe_supported(dev))
126 if (vf >= pci_dev->max_vfs)
132 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
134 mac->ops.set_mac_anti_spoofing(hw, on, vf);
140 rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id)
144 struct rte_eth_dev *dev;
145 struct rte_pci_device *pci_dev;
147 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
149 dev = &rte_eth_devices[port];
150 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
152 if (!is_ixgbe_supported(dev))
155 if (vf >= pci_dev->max_vfs)
158 if (vlan_id > RTE_ETHER_MAX_VLAN_ID)
161 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
162 ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
165 ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
170 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
176 rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on)
180 struct rte_eth_dev *dev;
182 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
184 dev = &rte_eth_devices[port];
186 if (!is_ixgbe_supported(dev))
192 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
193 ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
194 /* enable or disable VMDQ loopback */
196 ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
198 ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
200 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
206 rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on)
211 int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
212 struct rte_eth_dev *dev;
214 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
216 dev = &rte_eth_devices[port];
218 if (!is_ixgbe_supported(dev))
224 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
225 for (i = 0; i <= num_queues; i++) {
226 reg_value = IXGBE_QDE_WRITE |
227 (i << IXGBE_QDE_IDX_SHIFT) |
228 (on & IXGBE_QDE_ENABLE);
229 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
236 rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on)
240 struct rte_eth_dev *dev;
241 struct rte_pci_device *pci_dev;
243 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
245 dev = &rte_eth_devices[port];
246 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
248 if (!is_ixgbe_supported(dev))
251 /* only support VF's 0 to 63 */
252 if ((vf >= pci_dev->max_vfs) || (vf > 63))
258 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
259 reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
261 reg_value |= IXGBE_SRRCTL_DROP_EN;
263 reg_value &= ~IXGBE_SRRCTL_DROP_EN;
265 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
271 rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
273 struct rte_eth_dev *dev;
274 struct rte_pci_device *pci_dev;
276 uint16_t queues_per_pool;
279 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
281 dev = &rte_eth_devices[port];
282 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
283 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
285 if (!is_ixgbe_supported(dev))
288 if (vf >= pci_dev->max_vfs)
294 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
296 /* The PF has 128 queue pairs and in SRIOV configuration
297 * those queues will be assigned to VF's, so RXDCTL
298 * registers will be dealing with queues which will be
300 * Let's say we have SRIOV configured with 31 VF's then the
301 * first 124 queues 0-123 will be allocated to VF's and only
302 * the last 4 queues 123-127 will be assigned to the PF.
304 if (hw->mac.type == ixgbe_mac_82598EB)
305 queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
308 queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
311 for (q = 0; q < queues_per_pool; q++)
312 (*dev->dev_ops->vlan_strip_queue_set)(dev,
313 q + vf * queues_per_pool, on);
318 rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf,
319 uint16_t rx_mask, uint8_t on)
322 struct rte_eth_dev *dev;
323 struct rte_pci_device *pci_dev;
327 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
329 dev = &rte_eth_devices[port];
330 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
332 if (!is_ixgbe_supported(dev))
335 if (vf >= pci_dev->max_vfs)
341 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
342 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
344 if (hw->mac.type == ixgbe_mac_82598EB) {
345 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
346 " on 82599 hardware and newer");
349 if (ixgbe_vt_check(hw) < 0)
352 val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
359 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
365 rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on)
367 struct rte_eth_dev *dev;
368 struct rte_pci_device *pci_dev;
371 const uint8_t bit1 = 0x1;
374 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
376 dev = &rte_eth_devices[port];
377 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
379 if (!is_ixgbe_supported(dev))
382 if (vf >= pci_dev->max_vfs)
388 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
390 if (ixgbe_vt_check(hw) < 0)
393 /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
395 addr = IXGBE_VFRE(1);
396 val = bit1 << (vf - 32);
398 addr = IXGBE_VFRE(0);
402 reg = IXGBE_READ_REG(hw, addr);
409 IXGBE_WRITE_REG(hw, addr, reg);
415 rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on)
417 struct rte_eth_dev *dev;
418 struct rte_pci_device *pci_dev;
421 const uint8_t bit1 = 0x1;
425 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
427 dev = &rte_eth_devices[port];
428 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
430 if (!is_ixgbe_supported(dev))
433 if (vf >= pci_dev->max_vfs)
439 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
440 if (ixgbe_vt_check(hw) < 0)
443 /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
445 addr = IXGBE_VFTE(1);
446 val = bit1 << (vf - 32);
448 addr = IXGBE_VFTE(0);
452 reg = IXGBE_READ_REG(hw, addr);
459 IXGBE_WRITE_REG(hw, addr, reg);
465 rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
466 uint64_t vf_mask, uint8_t vlan_on)
468 struct rte_eth_dev *dev;
473 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
475 dev = &rte_eth_devices[port];
477 if (!is_ixgbe_supported(dev))
480 if (vlan > RTE_ETHER_MAX_VLAN_ID || vf_mask == 0)
483 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
484 if (ixgbe_vt_check(hw) < 0)
487 for (vf_idx = 0; vf_idx < 64; vf_idx++) {
488 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
489 ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
500 rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
501 uint16_t tx_rate, uint64_t q_msk)
503 struct rte_eth_dev *dev;
505 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
507 dev = &rte_eth_devices[port];
509 if (!is_ixgbe_supported(dev))
512 return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
516 rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp)
519 struct rte_eth_dev *dev;
522 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
524 dev = &rte_eth_devices[port];
526 if (!is_ixgbe_supported(dev))
529 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
531 /* Stop the data paths */
532 if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
536 * As no ixgbe_disable_sec_rx_path equivalent is
537 * implemented for tx in the base code, and we are
538 * not allowed to modify the base code in DPDK, so
539 * just call the hand-written one directly for now.
540 * The hardware support has been checked by
541 * ixgbe_disable_sec_rx_path().
543 ixgbe_disable_sec_tx_path_generic(hw);
545 /* Enable Ethernet CRC (required by MACsec offload) */
546 ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
547 ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
548 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
550 /* Enable the TX and RX crypto engines */
551 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
552 ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
553 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
555 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
556 ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
557 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
559 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
560 ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
562 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
564 /* Enable SA lookup */
565 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
566 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
567 ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
568 IXGBE_LSECTXCTRL_AUTH;
569 ctrl |= IXGBE_LSECTXCTRL_AISCI;
570 ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
571 ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
572 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
574 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
575 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
576 ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
577 ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
579 ctrl |= IXGBE_LSECRXCTRL_RP;
581 ctrl &= ~IXGBE_LSECRXCTRL_RP;
582 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
584 /* Start the data paths */
585 ixgbe_enable_sec_rx_path(hw);
588 * As no ixgbe_enable_sec_rx_path equivalent is
589 * implemented for tx in the base code, and we are
590 * not allowed to modify the base code in DPDK, so
591 * just call the hand-written one directly for now.
593 ixgbe_enable_sec_tx_path_generic(hw);
599 rte_pmd_ixgbe_macsec_disable(uint16_t port)
602 struct rte_eth_dev *dev;
605 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
607 dev = &rte_eth_devices[port];
609 if (!is_ixgbe_supported(dev))
612 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
614 /* Stop the data paths */
615 if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
619 * As no ixgbe_disable_sec_rx_path equivalent is
620 * implemented for tx in the base code, and we are
621 * not allowed to modify the base code in DPDK, so
622 * just call the hand-written one directly for now.
623 * The hardware support has been checked by
624 * ixgbe_disable_sec_rx_path().
626 ixgbe_disable_sec_tx_path_generic(hw);
628 /* Disable the TX and RX crypto engines */
629 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
630 ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
631 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
633 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
634 ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
635 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
637 /* Disable SA lookup */
638 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
639 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
640 ctrl |= IXGBE_LSECTXCTRL_DISABLE;
641 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
643 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
644 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
645 ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
646 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
648 /* Start the data paths */
649 ixgbe_enable_sec_rx_path(hw);
652 * As no ixgbe_enable_sec_rx_path equivalent is
653 * implemented for tx in the base code, and we are
654 * not allowed to modify the base code in DPDK, so
655 * just call the hand-written one directly for now.
657 ixgbe_enable_sec_tx_path_generic(hw);
663 rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac)
666 struct rte_eth_dev *dev;
669 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
671 dev = &rte_eth_devices[port];
673 if (!is_ixgbe_supported(dev))
676 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
678 ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
679 IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
681 ctrl = mac[4] | (mac[5] << 8);
682 IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
688 rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi)
691 struct rte_eth_dev *dev;
694 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
696 dev = &rte_eth_devices[port];
698 if (!is_ixgbe_supported(dev))
701 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
703 ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
704 IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
706 pi = rte_cpu_to_be_16(pi);
707 ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
708 IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
714 rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
715 uint32_t pn, uint8_t *key)
718 struct rte_eth_dev *dev;
721 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
723 dev = &rte_eth_devices[port];
725 if (!is_ixgbe_supported(dev))
728 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
730 if (idx != 0 && idx != 1)
736 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
738 /* Set the PN and key */
739 pn = rte_cpu_to_be_32(pn);
741 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
743 for (i = 0; i < 4; i++) {
744 ctrl = (key[i * 4 + 0] << 0) |
745 (key[i * 4 + 1] << 8) |
746 (key[i * 4 + 2] << 16) |
747 (key[i * 4 + 3] << 24);
748 IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
751 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
753 for (i = 0; i < 4; i++) {
754 ctrl = (key[i * 4 + 0] << 0) |
755 (key[i * 4 + 1] << 8) |
756 (key[i * 4 + 2] << 16) |
757 (key[i * 4 + 3] << 24);
758 IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
762 /* Set AN and select the SA */
763 ctrl = (an << idx * 2) | (idx << 4);
764 IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
770 rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
771 uint32_t pn, uint8_t *key)
774 struct rte_eth_dev *dev;
777 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
779 dev = &rte_eth_devices[port];
781 if (!is_ixgbe_supported(dev))
784 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
786 if (idx != 0 && idx != 1)
793 pn = rte_cpu_to_be_32(pn);
794 IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
797 for (i = 0; i < 4; i++) {
798 ctrl = (key[i * 4 + 0] << 0) |
799 (key[i * 4 + 1] << 8) |
800 (key[i * 4 + 2] << 16) |
801 (key[i * 4 + 3] << 24);
802 IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
805 /* Set the AN and validate the SA */
806 ctrl = an | (1 << 2);
807 IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
813 rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
817 struct rte_eth_dev *dev;
818 struct ixgbe_dcb_config *dcb_config;
819 struct ixgbe_dcb_tc_config *tc;
820 struct rte_eth_conf *eth_conf;
821 struct ixgbe_bw_conf *bw_conf;
826 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
828 dev = &rte_eth_devices[port];
830 if (!is_ixgbe_supported(dev))
833 if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
834 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
835 IXGBE_DCB_MAX_TRAFFIC_CLASS);
839 dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
840 bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
841 eth_conf = &dev->data->dev_conf;
843 if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
844 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
845 } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
846 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
855 if (nb_tcs != tc_num) {
857 "Weight should be set for all %d enabled TCs.",
863 for (i = 0; i < nb_tcs; i++)
867 "The summary of the TC weight should be 100.");
871 for (i = 0; i < nb_tcs; i++) {
872 tc = &dcb_config->tc_config[i];
873 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
875 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
876 tc = &dcb_config->tc_config[i];
877 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
880 bw_conf->tc_num = nb_tcs;
886 rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable)
889 struct rte_eth_dev *dev;
892 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
893 dev = &rte_eth_devices[port];
894 if (!is_ixgbe_supported(dev))
897 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
901 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
903 /* If 'enable' set the SBP bit else clear it */
905 fctrl |= IXGBE_FCTRL_SBP;
907 fctrl &= ~(IXGBE_FCTRL_SBP);
909 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
913 #ifdef RTE_LIBRTE_IXGBE_BYPASS
915 rte_pmd_ixgbe_bypass_init(uint16_t port_id)
917 struct rte_eth_dev *dev;
919 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
921 dev = &rte_eth_devices[port_id];
922 if (!is_ixgbe_supported(dev))
925 ixgbe_bypass_init(dev);
930 rte_pmd_ixgbe_bypass_state_show(uint16_t port_id, uint32_t *state)
932 struct rte_eth_dev *dev;
934 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
936 dev = &rte_eth_devices[port_id];
937 if (!is_ixgbe_supported(dev))
940 return ixgbe_bypass_state_show(dev, state);
944 rte_pmd_ixgbe_bypass_state_set(uint16_t port_id, uint32_t *new_state)
946 struct rte_eth_dev *dev;
948 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
950 dev = &rte_eth_devices[port_id];
951 if (!is_ixgbe_supported(dev))
954 return ixgbe_bypass_state_store(dev, new_state);
958 rte_pmd_ixgbe_bypass_event_show(uint16_t port_id,
962 struct rte_eth_dev *dev;
964 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
966 dev = &rte_eth_devices[port_id];
967 if (!is_ixgbe_supported(dev))
970 return ixgbe_bypass_event_show(dev, event, state);
974 rte_pmd_ixgbe_bypass_event_store(uint16_t port_id,
978 struct rte_eth_dev *dev;
980 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
982 dev = &rte_eth_devices[port_id];
983 if (!is_ixgbe_supported(dev))
986 return ixgbe_bypass_event_store(dev, event, state);
990 rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port_id, uint32_t timeout)
992 struct rte_eth_dev *dev;
994 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
996 dev = &rte_eth_devices[port_id];
997 if (!is_ixgbe_supported(dev))
1000 return ixgbe_bypass_wd_timeout_store(dev, timeout);
1004 rte_pmd_ixgbe_bypass_ver_show(uint16_t port_id, uint32_t *ver)
1006 struct rte_eth_dev *dev;
1008 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1010 dev = &rte_eth_devices[port_id];
1011 if (!is_ixgbe_supported(dev))
1014 return ixgbe_bypass_ver_show(dev, ver);
1018 rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port_id, uint32_t *wd_timeout)
1020 struct rte_eth_dev *dev;
1022 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1024 dev = &rte_eth_devices[port_id];
1025 if (!is_ixgbe_supported(dev))
1028 return ixgbe_bypass_wd_timeout_show(dev, wd_timeout);
1032 rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)
1034 struct rte_eth_dev *dev;
1036 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1038 dev = &rte_eth_devices[port_id];
1039 if (!is_ixgbe_supported(dev))
1042 return ixgbe_bypass_wd_reset(dev);
1047 * rte_pmd_ixgbe_acquire_swfw - Acquire SWFW semaphore
1048 * @hw: pointer to hardware structure
1049 * @mask: Mask to specify which semaphore to acquire
1051 * Acquires the SWFW semaphore and get the shared phy token as needed
1053 STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask)
1055 int retries = FW_PHY_TOKEN_RETRIES;
1056 s32 status = IXGBE_SUCCESS;
1059 status = ixgbe_acquire_swfw_semaphore(hw, mask);
1061 PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n",
1065 status = ixgbe_get_phy_token(hw);
1066 if (status == IXGBE_SUCCESS)
1067 return IXGBE_SUCCESS;
1069 if (status == IXGBE_ERR_TOKEN_RETRY)
1070 PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n",
1073 ixgbe_release_swfw_semaphore(hw, mask);
1074 if (status != IXGBE_ERR_TOKEN_RETRY) {
1076 "Retry get PHY token failed, Status=%d\n",
1081 PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n",
1087 * rte_pmd_ixgbe_release_swfw_sync - Release SWFW semaphore
1088 * @hw: pointer to hardware structure
1089 * @mask: Mask to specify which semaphore to release
1091 * Releases the SWFW semaphore and puts the shared phy token as needed
1093 STATIC void rte_pmd_ixgbe_release_swfw(struct ixgbe_hw *hw, u32 mask)
1095 ixgbe_put_phy_token(hw);
1096 ixgbe_release_swfw_semaphore(hw, mask);
1100 rte_pmd_ixgbe_mdio_lock(uint16_t port)
1102 struct ixgbe_hw *hw;
1103 struct rte_eth_dev *dev;
1106 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1107 dev = &rte_eth_devices[port];
1108 if (!is_ixgbe_supported(dev))
1111 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1116 swfw_mask = IXGBE_GSSR_PHY1_SM;
1118 swfw_mask = IXGBE_GSSR_PHY0_SM;
1120 if (rte_pmd_ixgbe_acquire_swfw(hw, swfw_mask))
1121 return IXGBE_ERR_SWFW_SYNC;
1123 return IXGBE_SUCCESS;
1127 rte_pmd_ixgbe_mdio_unlock(uint16_t port)
1129 struct rte_eth_dev *dev;
1130 struct ixgbe_hw *hw;
1133 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1135 dev = &rte_eth_devices[port];
1136 if (!is_ixgbe_supported(dev))
1139 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1144 swfw_mask = IXGBE_GSSR_PHY1_SM;
1146 swfw_mask = IXGBE_GSSR_PHY0_SM;
1148 rte_pmd_ixgbe_release_swfw(hw, swfw_mask);
1150 return IXGBE_SUCCESS;
1154 rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
1155 uint32_t dev_type, uint16_t *phy_data)
1157 struct ixgbe_hw *hw;
1158 struct rte_eth_dev *dev;
1159 u32 i, data, command;
1161 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1162 dev = &rte_eth_devices[port];
1163 if (!is_ixgbe_supported(dev))
1166 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1170 /* Setup and write the read command */
1171 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
1172 (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
1173 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
1174 IXGBE_MSCA_MDI_COMMAND;
1176 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
1178 /* Check every 10 usec to see if the access completed.
1179 * The MDI Command bit will clear when the operation is
1182 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1185 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
1186 if (!(command & IXGBE_MSCA_MDI_COMMAND))
1189 if (command & IXGBE_MSCA_MDI_COMMAND)
1190 return IXGBE_ERR_PHY;
1192 /* Read operation is complete. Get the data from MSRWD */
1193 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
1194 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
1195 *phy_data = (u16)data;
1201 rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
1202 uint32_t dev_type, uint16_t phy_data)
1204 struct ixgbe_hw *hw;
1206 struct rte_eth_dev *dev;
1208 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1209 dev = &rte_eth_devices[port];
1210 if (!is_ixgbe_supported(dev))
1213 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1217 /* Put the data in the MDI single read and write data register*/
1218 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
1220 /* Setup and write the write command */
1221 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
1222 (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
1223 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
1224 IXGBE_MSCA_MDI_COMMAND;
1226 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
1228 /* Check every 10 usec to see if the access completed.
1229 * The MDI Command bit will clear when the operation is
1232 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1235 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
1236 if (!(command & IXGBE_MSCA_MDI_COMMAND))
1239 if (command & IXGBE_MSCA_MDI_COMMAND) {
1240 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1241 "PHY write cmd didn't complete\n");
1242 return IXGBE_ERR_PHY;