1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
5 #include <rte_ethdev_driver.h>
7 #include "base/ixgbe_api.h"
8 #include "ixgbe_ethdev.h"
9 #include "rte_pmd_ixgbe.h"
12 rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
13 struct ether_addr *mac_addr)
16 struct ixgbe_vf_info *vfinfo;
18 uint8_t *new_mac = (uint8_t *)(mac_addr);
19 struct rte_eth_dev *dev;
20 struct rte_pci_device *pci_dev;
22 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
24 dev = &rte_eth_devices[port];
25 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
27 if (!is_ixgbe_supported(dev))
30 if (vf >= pci_dev->max_vfs)
33 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
34 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
35 rar_entry = hw->mac.num_rar_entries - (vf + 1);
37 if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
38 rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
40 return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
47 rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf)
50 struct ixgbe_vf_info *vfinfo;
51 struct rte_eth_dev *dev;
52 struct rte_pci_device *pci_dev;
55 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
57 dev = &rte_eth_devices[port];
58 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
60 if (!is_ixgbe_supported(dev))
63 if (vf >= pci_dev->max_vfs)
66 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
67 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
69 ctrl = IXGBE_PF_CONTROL_MSG;
70 if (vfinfo[vf].clear_to_send)
71 ctrl |= IXGBE_VT_MSGTYPE_CTS;
73 ixgbe_write_mbx(hw, &ctrl, 1, vf);
79 rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
82 struct ixgbe_mac_info *mac;
83 struct rte_eth_dev *dev;
84 struct rte_pci_device *pci_dev;
86 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
88 dev = &rte_eth_devices[port];
89 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
91 if (!is_ixgbe_supported(dev))
94 if (vf >= pci_dev->max_vfs)
100 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
103 mac->ops.set_vlan_anti_spoofing(hw, on, vf);
109 rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
112 struct ixgbe_mac_info *mac;
113 struct rte_eth_dev *dev;
114 struct rte_pci_device *pci_dev;
116 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
118 dev = &rte_eth_devices[port];
119 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
121 if (!is_ixgbe_supported(dev))
124 if (vf >= pci_dev->max_vfs)
130 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
132 mac->ops.set_mac_anti_spoofing(hw, on, vf);
138 rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id)
142 struct rte_eth_dev *dev;
143 struct rte_pci_device *pci_dev;
145 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
147 dev = &rte_eth_devices[port];
148 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
150 if (!is_ixgbe_supported(dev))
153 if (vf >= pci_dev->max_vfs)
156 if (vlan_id > ETHER_MAX_VLAN_ID)
159 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
160 ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
163 ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
168 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
174 rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on)
178 struct rte_eth_dev *dev;
180 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
182 dev = &rte_eth_devices[port];
184 if (!is_ixgbe_supported(dev))
190 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
191 ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
192 /* enable or disable VMDQ loopback */
194 ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
196 ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
198 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
204 rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on)
209 int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
210 struct rte_eth_dev *dev;
212 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
214 dev = &rte_eth_devices[port];
216 if (!is_ixgbe_supported(dev))
222 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
223 for (i = 0; i <= num_queues; i++) {
224 reg_value = IXGBE_QDE_WRITE |
225 (i << IXGBE_QDE_IDX_SHIFT) |
226 (on & IXGBE_QDE_ENABLE);
227 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
234 rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on)
238 struct rte_eth_dev *dev;
239 struct rte_pci_device *pci_dev;
241 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
243 dev = &rte_eth_devices[port];
244 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
246 if (!is_ixgbe_supported(dev))
249 /* only support VF's 0 to 63 */
250 if ((vf >= pci_dev->max_vfs) || (vf > 63))
256 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
257 reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
259 reg_value |= IXGBE_SRRCTL_DROP_EN;
261 reg_value &= ~IXGBE_SRRCTL_DROP_EN;
263 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
269 rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
271 struct rte_eth_dev *dev;
272 struct rte_pci_device *pci_dev;
274 uint16_t queues_per_pool;
277 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
279 dev = &rte_eth_devices[port];
280 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
281 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
283 if (!is_ixgbe_supported(dev))
286 if (vf >= pci_dev->max_vfs)
292 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
294 /* The PF has 128 queue pairs and in SRIOV configuration
295 * those queues will be assigned to VF's, so RXDCTL
296 * registers will be dealing with queues which will be
298 * Let's say we have SRIOV configured with 31 VF's then the
299 * first 124 queues 0-123 will be allocated to VF's and only
300 * the last 4 queues 123-127 will be assigned to the PF.
302 if (hw->mac.type == ixgbe_mac_82598EB)
303 queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
306 queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
309 for (q = 0; q < queues_per_pool; q++)
310 (*dev->dev_ops->vlan_strip_queue_set)(dev,
311 q + vf * queues_per_pool, on);
316 rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf,
317 uint16_t rx_mask, uint8_t on)
320 struct rte_eth_dev *dev;
321 struct rte_pci_device *pci_dev;
325 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
327 dev = &rte_eth_devices[port];
328 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
330 if (!is_ixgbe_supported(dev))
333 if (vf >= pci_dev->max_vfs)
339 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
340 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
342 if (hw->mac.type == ixgbe_mac_82598EB) {
343 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
344 " on 82599 hardware and newer");
347 if (ixgbe_vt_check(hw) < 0)
350 val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
357 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
363 rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on)
365 struct rte_eth_dev *dev;
366 struct rte_pci_device *pci_dev;
369 const uint8_t bit1 = 0x1;
372 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
374 dev = &rte_eth_devices[port];
375 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
377 if (!is_ixgbe_supported(dev))
380 if (vf >= pci_dev->max_vfs)
386 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
388 if (ixgbe_vt_check(hw) < 0)
391 /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
393 addr = IXGBE_VFRE(1);
394 val = bit1 << (vf - 32);
396 addr = IXGBE_VFRE(0);
400 reg = IXGBE_READ_REG(hw, addr);
407 IXGBE_WRITE_REG(hw, addr, reg);
413 rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on)
415 struct rte_eth_dev *dev;
416 struct rte_pci_device *pci_dev;
419 const uint8_t bit1 = 0x1;
423 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
425 dev = &rte_eth_devices[port];
426 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
428 if (!is_ixgbe_supported(dev))
431 if (vf >= pci_dev->max_vfs)
437 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
438 if (ixgbe_vt_check(hw) < 0)
441 /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
443 addr = IXGBE_VFTE(1);
444 val = bit1 << (vf - 32);
446 addr = IXGBE_VFTE(0);
450 reg = IXGBE_READ_REG(hw, addr);
457 IXGBE_WRITE_REG(hw, addr, reg);
463 rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
464 uint64_t vf_mask, uint8_t vlan_on)
466 struct rte_eth_dev *dev;
471 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
473 dev = &rte_eth_devices[port];
475 if (!is_ixgbe_supported(dev))
478 if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
481 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
482 if (ixgbe_vt_check(hw) < 0)
485 for (vf_idx = 0; vf_idx < 64; vf_idx++) {
486 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
487 ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
498 rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
499 uint16_t tx_rate, uint64_t q_msk)
501 struct rte_eth_dev *dev;
503 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
505 dev = &rte_eth_devices[port];
507 if (!is_ixgbe_supported(dev))
510 return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
514 rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp)
517 struct rte_eth_dev *dev;
520 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
522 dev = &rte_eth_devices[port];
524 if (!is_ixgbe_supported(dev))
527 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
529 /* Stop the data paths */
530 if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
534 * As no ixgbe_disable_sec_rx_path equivalent is
535 * implemented for tx in the base code, and we are
536 * not allowed to modify the base code in DPDK, so
537 * just call the hand-written one directly for now.
538 * The hardware support has been checked by
539 * ixgbe_disable_sec_rx_path().
541 ixgbe_disable_sec_tx_path_generic(hw);
543 /* Enable Ethernet CRC (required by MACsec offload) */
544 ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
545 ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
546 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
548 /* Enable the TX and RX crypto engines */
549 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
550 ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
551 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
553 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
554 ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
555 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
557 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
558 ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
560 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
562 /* Enable SA lookup */
563 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
564 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
565 ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
566 IXGBE_LSECTXCTRL_AUTH;
567 ctrl |= IXGBE_LSECTXCTRL_AISCI;
568 ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
569 ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
570 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
572 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
573 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
574 ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
575 ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
577 ctrl |= IXGBE_LSECRXCTRL_RP;
579 ctrl &= ~IXGBE_LSECRXCTRL_RP;
580 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
582 /* Start the data paths */
583 ixgbe_enable_sec_rx_path(hw);
586 * As no ixgbe_enable_sec_rx_path equivalent is
587 * implemented for tx in the base code, and we are
588 * not allowed to modify the base code in DPDK, so
589 * just call the hand-written one directly for now.
591 ixgbe_enable_sec_tx_path_generic(hw);
597 rte_pmd_ixgbe_macsec_disable(uint16_t port)
600 struct rte_eth_dev *dev;
603 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
605 dev = &rte_eth_devices[port];
607 if (!is_ixgbe_supported(dev))
610 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
612 /* Stop the data paths */
613 if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
617 * As no ixgbe_disable_sec_rx_path equivalent is
618 * implemented for tx in the base code, and we are
619 * not allowed to modify the base code in DPDK, so
620 * just call the hand-written one directly for now.
621 * The hardware support has been checked by
622 * ixgbe_disable_sec_rx_path().
624 ixgbe_disable_sec_tx_path_generic(hw);
626 /* Disable the TX and RX crypto engines */
627 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
628 ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
629 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
631 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
632 ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
633 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
635 /* Disable SA lookup */
636 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
637 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
638 ctrl |= IXGBE_LSECTXCTRL_DISABLE;
639 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
641 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
642 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
643 ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
644 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
646 /* Start the data paths */
647 ixgbe_enable_sec_rx_path(hw);
650 * As no ixgbe_enable_sec_rx_path equivalent is
651 * implemented for tx in the base code, and we are
652 * not allowed to modify the base code in DPDK, so
653 * just call the hand-written one directly for now.
655 ixgbe_enable_sec_tx_path_generic(hw);
661 rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac)
664 struct rte_eth_dev *dev;
667 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
669 dev = &rte_eth_devices[port];
671 if (!is_ixgbe_supported(dev))
674 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
676 ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
677 IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
679 ctrl = mac[4] | (mac[5] << 8);
680 IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
686 rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi)
689 struct rte_eth_dev *dev;
692 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
694 dev = &rte_eth_devices[port];
696 if (!is_ixgbe_supported(dev))
699 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
701 ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
702 IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
704 pi = rte_cpu_to_be_16(pi);
705 ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
706 IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
712 rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
713 uint32_t pn, uint8_t *key)
716 struct rte_eth_dev *dev;
719 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
721 dev = &rte_eth_devices[port];
723 if (!is_ixgbe_supported(dev))
726 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
728 if (idx != 0 && idx != 1)
734 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
736 /* Set the PN and key */
737 pn = rte_cpu_to_be_32(pn);
739 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
741 for (i = 0; i < 4; i++) {
742 ctrl = (key[i * 4 + 0] << 0) |
743 (key[i * 4 + 1] << 8) |
744 (key[i * 4 + 2] << 16) |
745 (key[i * 4 + 3] << 24);
746 IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
749 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
751 for (i = 0; i < 4; i++) {
752 ctrl = (key[i * 4 + 0] << 0) |
753 (key[i * 4 + 1] << 8) |
754 (key[i * 4 + 2] << 16) |
755 (key[i * 4 + 3] << 24);
756 IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
760 /* Set AN and select the SA */
761 ctrl = (an << idx * 2) | (idx << 4);
762 IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
768 rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
769 uint32_t pn, uint8_t *key)
772 struct rte_eth_dev *dev;
775 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
777 dev = &rte_eth_devices[port];
779 if (!is_ixgbe_supported(dev))
782 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
784 if (idx != 0 && idx != 1)
791 pn = rte_cpu_to_be_32(pn);
792 IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
795 for (i = 0; i < 4; i++) {
796 ctrl = (key[i * 4 + 0] << 0) |
797 (key[i * 4 + 1] << 8) |
798 (key[i * 4 + 2] << 16) |
799 (key[i * 4 + 3] << 24);
800 IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
803 /* Set the AN and validate the SA */
804 ctrl = an | (1 << 2);
805 IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
811 rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
815 struct rte_eth_dev *dev;
816 struct ixgbe_dcb_config *dcb_config;
817 struct ixgbe_dcb_tc_config *tc;
818 struct rte_eth_conf *eth_conf;
819 struct ixgbe_bw_conf *bw_conf;
824 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
826 dev = &rte_eth_devices[port];
828 if (!is_ixgbe_supported(dev))
831 if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
832 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
833 IXGBE_DCB_MAX_TRAFFIC_CLASS);
837 dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
838 bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
839 eth_conf = &dev->data->dev_conf;
841 if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
842 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
843 } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
844 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
853 if (nb_tcs != tc_num) {
855 "Weight should be set for all %d enabled TCs.",
861 for (i = 0; i < nb_tcs; i++)
865 "The summary of the TC weight should be 100.");
869 for (i = 0; i < nb_tcs; i++) {
870 tc = &dcb_config->tc_config[i];
871 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
873 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
874 tc = &dcb_config->tc_config[i];
875 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
878 bw_conf->tc_num = nb_tcs;
883 #ifdef RTE_LIBRTE_IXGBE_BYPASS
885 rte_pmd_ixgbe_bypass_init(uint16_t port_id)
887 struct rte_eth_dev *dev;
889 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
891 dev = &rte_eth_devices[port_id];
892 if (!is_ixgbe_supported(dev))
895 ixgbe_bypass_init(dev);
900 rte_pmd_ixgbe_bypass_state_show(uint16_t port_id, uint32_t *state)
902 struct rte_eth_dev *dev;
904 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
906 dev = &rte_eth_devices[port_id];
907 if (!is_ixgbe_supported(dev))
910 return ixgbe_bypass_state_show(dev, state);
914 rte_pmd_ixgbe_bypass_state_set(uint16_t port_id, uint32_t *new_state)
916 struct rte_eth_dev *dev;
918 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
920 dev = &rte_eth_devices[port_id];
921 if (!is_ixgbe_supported(dev))
924 return ixgbe_bypass_state_store(dev, new_state);
928 rte_pmd_ixgbe_bypass_event_show(uint16_t port_id,
932 struct rte_eth_dev *dev;
934 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
936 dev = &rte_eth_devices[port_id];
937 if (!is_ixgbe_supported(dev))
940 return ixgbe_bypass_event_show(dev, event, state);
944 rte_pmd_ixgbe_bypass_event_store(uint16_t port_id,
948 struct rte_eth_dev *dev;
950 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
952 dev = &rte_eth_devices[port_id];
953 if (!is_ixgbe_supported(dev))
956 return ixgbe_bypass_event_store(dev, event, state);
960 rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port_id, uint32_t timeout)
962 struct rte_eth_dev *dev;
964 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
966 dev = &rte_eth_devices[port_id];
967 if (!is_ixgbe_supported(dev))
970 return ixgbe_bypass_wd_timeout_store(dev, timeout);
974 rte_pmd_ixgbe_bypass_ver_show(uint16_t port_id, uint32_t *ver)
976 struct rte_eth_dev *dev;
978 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
980 dev = &rte_eth_devices[port_id];
981 if (!is_ixgbe_supported(dev))
984 return ixgbe_bypass_ver_show(dev, ver);
988 rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port_id, uint32_t *wd_timeout)
990 struct rte_eth_dev *dev;
992 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
994 dev = &rte_eth_devices[port_id];
995 if (!is_ixgbe_supported(dev))
998 return ixgbe_bypass_wd_timeout_show(dev, wd_timeout);
1002 rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)
1004 struct rte_eth_dev *dev;
1006 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1008 dev = &rte_eth_devices[port_id];
1009 if (!is_ixgbe_supported(dev))
1012 return ixgbe_bypass_wd_reset(dev);