#include "txgbe_mbx.h"
+/**
+ * txgbe_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns 0 if it successfully read message from buffer
+ **/
+s32 txgbe_read_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_read_mbx");
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->read)
+ ret_val = mbx->read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * txgbe_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+s32 txgbe_write_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = 0;
+
+ DEBUGFUNC("txgbe_write_mbx");
+
+ if (size > mbx->size) {
+ ret_val = TXGBE_ERR_MBX;
+ DEBUGOUT("Invalid mailbox message size %d", size);
+ } else if (mbx->write) {
+ ret_val = mbx->write(hw, msg, size, mbx_id);
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns 0 if the Status bit was found or else ERR_MBX
+ **/
+s32 txgbe_check_for_msg(struct txgbe_hw *hw, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_check_for_msg");
+
+ if (mbx->check_for_msg)
+ ret_val = mbx->check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns 0 if the Status bit was found or else ERR_MBX
+ **/
+s32 txgbe_check_for_ack(struct txgbe_hw *hw, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_check_for_ack");
+
+ if (mbx->check_for_ack)
+ ret_val = mbx->check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns 0 if the Status bit was found or else ERR_MBX
+ **/
+s32 txgbe_check_for_rst(struct txgbe_hw *hw, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_check_for_rst");
+
+ if (mbx->check_for_rst)
+ ret_val = mbx->check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+STATIC s32 txgbe_check_for_bit_pf(struct txgbe_hw *hw, u32 mask, s32 index)
+{
+ u32 mbvficr = rd32(hw, TXGBE_MBVFICR(index));
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = 0;
+ wr32(hw, TXGBE_MBVFICR(index), mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns 0 if the VF has set the Status bit or else ERR_MBX
+ **/
+s32 txgbe_check_for_msg_pf(struct txgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = TXGBE_ERR_MBX;
+ s32 index = TXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ DEBUGFUNC("txgbe_check_for_msg_pf");
+
+ if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns 0 if the VF has set the Status bit or else ERR_MBX
+ **/
+s32 txgbe_check_for_ack_pf(struct txgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = TXGBE_ERR_MBX;
+ s32 index = TXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ DEBUGFUNC("txgbe_check_for_ack_pf");
+
+ if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns 0 if the VF has set the Status bit or else ERR_MBX
+ **/
+s32 txgbe_check_for_rst_pf(struct txgbe_hw *hw, u16 vf_number)
+{
+ u32 reg_offset = (vf_number < 32) ? 0 : 1;
+ u32 vf_shift = vf_number % 32;
+ u32 vflre = 0;
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_check_for_rst_pf");
+
+ vflre = rd32(hw, TXGBE_FLRVFE(reg_offset));
+ if (vflre & (1 << vf_shift)) {
+ ret_val = 0;
+ wr32(hw, TXGBE_FLRVFEC(reg_offset), (1 << vf_shift));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return 0 if we obtained the mailbox lock
+ **/
+STATIC s32 txgbe_obtain_mbx_lock_pf(struct txgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = TXGBE_ERR_MBX;
+ u32 p2v_mailbox;
+
+ DEBUGFUNC("txgbe_obtain_mbx_lock_pf");
+
+ /* Take ownership of the buffer */
+ wr32(hw, TXGBE_MBCTL(vf_number), TXGBE_MBCTL_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = rd32(hw, TXGBE_MBCTL(vf_number));
+ if (p2v_mailbox & TXGBE_MBCTL_PFU)
+ ret_val = 0;
+ else
+ DEBUGOUT("Failed to obtain mailbox lock for VF%d", vf_number);
+
+
+ return ret_val;
+}
+
+/**
+ * txgbe_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+s32 txgbe_write_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("txgbe_write_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = txgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ txgbe_check_for_msg_pf(hw, vf_number);
+ txgbe_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ wr32a(hw, TXGBE_MBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ wr32(hw, TXGBE_MBCTL(vf_number), TXGBE_MBCTL_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * txgbe_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+s32 txgbe_read_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("txgbe_read_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = txgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = rd32a(hw, TXGBE_MBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ wr32(hw, TXGBE_MBCTL(vf_number), TXGBE_MBCTL_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
/**
* txgbe_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
#include "txgbe_type.h"
-void txgbe_init_mbx_params_pf(struct txgbe_hw *);
+#define TXGBE_ERR_MBX -100
+
+/* If it's a TXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is TXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+/* Messages below or'd with this are the ACK */
+#define TXGBE_VT_MSGTYPE_ACK 0x80000000
+/* Messages below or'd with this are the NACK */
+#define TXGBE_VT_MSGTYPE_NACK 0x40000000
+/* Indicates that VF is still clear to send requests */
+#define TXGBE_VT_MSGTYPE_CTS 0x20000000
+
+#define TXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for extra info for certain messages */
+#define TXGBE_VT_MSGINFO_MASK (0xFF << TXGBE_VT_MSGINFO_SHIFT)
+
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum txgbe_pfvf_api_rev {
+ txgbe_mbox_api_null,
+ txgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
+ txgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
+ txgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
+ txgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
+ txgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ /* This value should always be last */
+ txgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
+#define TXGBE_VF_RESET 0x01 /* VF requests reset */
+#define TXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define TXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define TXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+
+/* mailbox API, version 1.0 VF requests */
+#define TXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define TXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+
+/* mailbox API, version 1.2 VF requests */
+#define TXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
+#define TXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define TXGBE_VF_UPDATE_XCAST_MODE 0x0c
+
+/* mode choices for TXGBE_VF_UPDATE_XCAST_MODE */
+enum txgbevf_xcast_modes {
+ TXGBEVF_XCAST_MODE_NONE = 0,
+ TXGBEVF_XCAST_MODE_MULTI,
+ TXGBEVF_XCAST_MODE_ALLMULTI,
+ TXGBEVF_XCAST_MODE_PROMISC,
+};
+
+/* GET_QUEUES return data indices within the mailbox */
+#define TXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define TXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define TXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define TXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
+
+/* length of permanent address message returned from PF */
+#define TXGBE_VF_PERMADDR_MSG_LEN 4
+
+s32 txgbe_read_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+s32 txgbe_write_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+s32 txgbe_check_for_msg(struct txgbe_hw *hw, u16 mbx_id);
+s32 txgbe_check_for_ack(struct txgbe_hw *hw, u16 mbx_id);
+s32 txgbe_check_for_rst(struct txgbe_hw *hw, u16 mbx_id);
+void txgbe_init_mbx_params_pf(struct txgbe_hw *hw);
+
+s32 txgbe_read_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf_number);
+s32 txgbe_write_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf_number);
+s32 txgbe_check_for_msg_pf(struct txgbe_hw *hw, u16 vf_number);
+s32 txgbe_check_for_ack_pf(struct txgbe_hw *hw, u16 vf_number);
+s32 txgbe_check_for_rst_pf(struct txgbe_hw *hw, u16 vf_number);
#endif /* _TXGBE_MBX_H_ */
#include "base/txgbe.h"
#include "txgbe_ethdev.h"
+#include "rte_pmd_txgbe.h"
+
+#define TXGBE_VF_MSG_SIZE_DEFAULT 1
+#define TXGBE_VF_GET_QUEUE_MSG_SIZE 5
static inline uint16_t
dev_num_vf(struct rte_eth_dev *eth_dev)
*vfinfo = NULL;
}
+static void
+txgbe_set_rx_mode(struct rte_eth_dev *eth_dev)
+{
+ struct rte_eth_dev_data *dev_data = eth_dev->data;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ u32 fctrl, vmolr;
+ uint16_t vfn = dev_num_vf(eth_dev);
+
+ /* disable store-bad-packets */
+ wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_SAVEBAD, 0);
+
+ /* Check for Promiscuous and All Multicast modes */
+ fctrl = rd32m(hw, TXGBE_PSRCTL,
+ ~(TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP));
+ fctrl |= TXGBE_PSRCTL_BCA |
+ TXGBE_PSRCTL_MCHFENA;
+
+ vmolr = rd32m(hw, TXGBE_POOLETHCTL(vfn),
+ ~(TXGBE_POOLETHCTL_UCP |
+ TXGBE_POOLETHCTL_MCP |
+ TXGBE_POOLETHCTL_UCHA |
+ TXGBE_POOLETHCTL_MCHA));
+ vmolr |= TXGBE_POOLETHCTL_BCA |
+ TXGBE_POOLETHCTL_UTA |
+ TXGBE_POOLETHCTL_VLA;
+
+ if (dev_data->promiscuous) {
+ fctrl |= TXGBE_PSRCTL_UCP |
+ TXGBE_PSRCTL_MCP;
+ /* pf don't want packets routing to vf, so clear UPE */
+ vmolr |= TXGBE_POOLETHCTL_MCP;
+ } else if (dev_data->all_multicast) {
+ fctrl |= TXGBE_PSRCTL_MCP;
+ vmolr |= TXGBE_POOLETHCTL_MCP;
+ } else {
+ vmolr |= TXGBE_POOLETHCTL_UCHA;
+ vmolr |= TXGBE_POOLETHCTL_MCHA;
+ }
+
+ wr32(hw, TXGBE_POOLETHCTL(vfn), vmolr);
+
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+
+ txgbe_vlan_hw_strip_config(eth_dev);
+}
+
+static inline void
+txgbe_vf_reset_event(struct rte_eth_dev *eth_dev, uint16_t vf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ uint32_t vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
+
+ vmolr |= (TXGBE_POOLETHCTL_UCHA |
+ TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_UTA);
+ wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
+
+ wr32(hw, TXGBE_POOLTAG(vf), 0);
+
+ /* reset multicast table array for vf */
+ vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* reset rx mode */
+ txgbe_set_rx_mode(eth_dev);
+
+ hw->mac.clear_rar(hw, rar_entry);
+}
+
+static inline void
+txgbe_vf_reset_msg(struct rte_eth_dev *eth_dev, uint16_t vf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ uint32_t reg;
+ uint32_t reg_offset, vf_shift;
+ const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
+ const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+ uint8_t nb_q_per_pool;
+ int i;
+
+ vf_shift = vf & VFRE_MASK;
+ reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
+
+ /* enable transmit for vf */
+ reg = rd32(hw, TXGBE_POOLTXENA(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ wr32(hw, TXGBE_POOLTXENA(reg_offset), reg);
+
+ /* enable all queue drop for IOV */
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
+ for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
+ txgbe_flush(hw);
+ reg = 1 << (i % 32);
+ wr32m(hw, TXGBE_QPRXDROP(i / 32), reg, reg);
+ }
+
+ /* enable receive for vf */
+ reg = rd32(hw, TXGBE_POOLRXENA(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ wr32(hw, TXGBE_POOLRXENA(reg_offset), reg);
+
+ txgbe_vf_reset_event(eth_dev, vf);
+}
+
+static int
+txgbe_disable_vf_mc_promisc(struct rte_eth_dev *eth_dev, uint32_t vf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ uint32_t vmolr;
+
+ vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
+
+ PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf);
+
+ vmolr &= ~TXGBE_POOLETHCTL_MCP;
+
+ wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
+
+ return 0;
+}
+
+static int
+txgbe_vf_reset(struct rte_eth_dev *eth_dev, uint16_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
+ unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+
+ txgbe_vf_reset_msg(eth_dev, vf);
+
+ hw->mac.set_rar(hw, rar_entry, vf_mac, vf, true);
+
+ /* Disable multicast promiscuous at reset */
+ txgbe_disable_vf_mc_promisc(eth_dev, vf);
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = TXGBE_VF_RESET | TXGBE_VT_MSGTYPE_ACK;
+ rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ txgbe_write_mbx(hw, msgbuf, TXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return 0;
+}
+
+static int
+txgbe_vf_set_mac_addr(struct rte_eth_dev *eth_dev,
+ uint32_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+ struct rte_ether_addr *ea = (struct rte_ether_addr *)new_mac;
+
+ if (rte_is_valid_assigned_ether_addr(ea)) {
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
+ return hw->mac.set_rar(hw, rar_entry, new_mac, vf, true);
+ }
+ return -1;
+}
+
+static int
+txgbe_vf_set_multicast(struct rte_eth_dev *eth_dev,
+ uint32_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
+ int nb_entries = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >>
+ TXGBE_VT_MSGINFO_SHIFT;
+ uint16_t *hash_list = (uint16_t *)&msgbuf[1];
+ uint32_t mta_idx;
+ uint32_t mta_shift;
+ const uint32_t TXGBE_MTA_INDEX_MASK = 0x7F;
+ const uint32_t TXGBE_MTA_BIT_SHIFT = 5;
+ const uint32_t TXGBE_MTA_BIT_MASK = (0x1 << TXGBE_MTA_BIT_SHIFT) - 1;
+ uint32_t reg_val;
+ int i;
+ u32 vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
+
+ /* Disable multicast promiscuous first */
+ txgbe_disable_vf_mc_promisc(eth_dev, vf);
+
+ /* only so many hash values supported */
+ nb_entries = RTE_MIN(nb_entries, TXGBE_MAX_VF_MC_ENTRIES);
+
+ /* store the mc entries */
+ vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
+ for (i = 0; i < nb_entries; i++)
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+
+ if (nb_entries == 0) {
+ vmolr &= ~TXGBE_POOLETHCTL_MCHA;
+ wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
+ return 0;
+ }
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ mta_idx = (vfinfo->vf_mc_hashes[i] >> TXGBE_MTA_BIT_SHIFT)
+ & TXGBE_MTA_INDEX_MASK;
+ mta_shift = vfinfo->vf_mc_hashes[i] & TXGBE_MTA_BIT_MASK;
+ reg_val = rd32(hw, TXGBE_MCADDRTBL(mta_idx));
+ reg_val |= (1 << mta_shift);
+ wr32(hw, TXGBE_MCADDRTBL(mta_idx), reg_val);
+ }
+
+ vmolr |= TXGBE_POOLETHCTL_MCHA;
+ wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
+
+ return 0;
+}
+
+static int
+txgbe_vf_set_vlan(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
+{
+ int add, vid;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
+
+ add = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK)
+ >> TXGBE_VT_MSGINFO_SHIFT;
+ vid = TXGBE_PSRVLAN_VID(msgbuf[1]);
+
+ if (add)
+ vfinfo[vf].vlan_count++;
+ else if (vfinfo[vf].vlan_count)
+ vfinfo[vf].vlan_count--;
+ return hw->mac.set_vfta(hw, vid, vf, (bool)add, false);
+}
+
+static int
+txgbe_set_vf_lpe(struct rte_eth_dev *eth_dev,
+ __rte_unused uint32_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ uint32_t max_frame = msgbuf[1];
+ uint32_t max_frs;
+
+ if (max_frame < RTE_ETHER_MIN_LEN ||
+ max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
+ return -1;
+
+ max_frs = rd32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK);
+ if (max_frs < max_frame) {
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRMSZ_MAX(max_frame));
+ }
+
+ return 0;
+}
+
+static int
+txgbe_negotiate_vf_api(struct rte_eth_dev *eth_dev,
+ uint32_t vf, uint32_t *msgbuf)
+{
+ uint32_t api_version = msgbuf[1];
+ struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev);
+
+ switch (api_version) {
+ case txgbe_mbox_api_10:
+ case txgbe_mbox_api_11:
+ case txgbe_mbox_api_12:
+ case txgbe_mbox_api_13:
+ vfinfo[vf].api_version = (uint8_t)api_version;
+ return 0;
+ default:
+ break;
+ }
+
+ PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n",
+ api_version, vf);
+
+ return -1;
+}
+
+static int
+txgbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev);
+ uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
+ struct rte_eth_conf *eth_conf;
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf;
+ u8 num_tcs;
+ struct txgbe_hw *hw;
+ u32 vmvir;
+ u32 vlana;
+ u32 vid;
+ u32 user_priority;
+
+ /* Verify if the PF supports the mbox APIs version or not */
+ switch (vfinfo[vf].api_version) {
+ case txgbe_mbox_api_20:
+ case txgbe_mbox_api_11:
+ case txgbe_mbox_api_12:
+ case txgbe_mbox_api_13:
+ break;
+ default:
+ return -1;
+ }
+
+ /* Notify VF of Rx and Tx queue number */
+ msgbuf[TXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
+ msgbuf[TXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
+
+ /* Notify VF of default queue */
+ msgbuf[TXGBE_VF_DEF_QUEUE] = default_q;
+
+ /* Notify VF of number of DCB traffic classes */
+ eth_conf = ð_dev->data->dev_conf;
+ switch (eth_conf->txmode.mq_mode) {
+ case ETH_MQ_TX_NONE:
+ case ETH_MQ_TX_DCB:
+ PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
+ ", but its tx mode = %d\n", vf,
+ eth_conf->txmode.mq_mode);
+ return -1;
+
+ case ETH_MQ_TX_VMDQ_DCB:
+ vmdq_dcb_tx_conf = ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
+ switch (vmdq_dcb_tx_conf->nb_queue_pools) {
+ case ETH_16_POOLS:
+ num_tcs = ETH_8_TCS;
+ break;
+ case ETH_32_POOLS:
+ num_tcs = ETH_4_TCS;
+ break;
+ default:
+ return -1;
+ }
+ break;
+
+ /* ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */
+ case ETH_MQ_TX_VMDQ_ONLY:
+ hw = TXGBE_DEV_HW(eth_dev);
+ vmvir = rd32(hw, TXGBE_POOLTAG(vf));
+ vlana = vmvir & TXGBE_POOLTAG_ACT_MASK;
+ vid = vmvir & TXGBE_POOLTAG_VTAG_MASK;
+ user_priority =
+ TXGBD_POOLTAG_VTAG_UP(vmvir);
+ if (vlana == TXGBE_POOLTAG_ACT_ALWAYS &&
+ (vid != 0 || user_priority != 0))
+ num_tcs = 1;
+ else
+ num_tcs = 0;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "PF work with invalid mode = %d\n",
+ eth_conf->txmode.mq_mode);
+ return -1;
+ }
+ msgbuf[TXGBE_VF_TRANS_VLAN] = num_tcs;
+
+ return 0;
+}
+
+static int
+txgbe_set_vf_mc_promisc(struct rte_eth_dev *eth_dev,
+ uint32_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ int xcast_mode = msgbuf[1]; /* msgbuf contains the flag to enable */
+ u32 vmolr, fctrl, disable, enable;
+
+ switch (vfinfo[vf].api_version) {
+ case txgbe_mbox_api_12:
+ /* promisc introduced in 1.3 version */
+ if (xcast_mode == TXGBEVF_XCAST_MODE_PROMISC)
+ return -EOPNOTSUPP;
+ break;
+ /* Fall threw */
+ case txgbe_mbox_api_13:
+ break;
+ default:
+ return -1;
+ }
+
+ if (vfinfo[vf].xcast_mode == xcast_mode)
+ goto out;
+
+ switch (xcast_mode) {
+ case TXGBEVF_XCAST_MODE_NONE:
+ disable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA |
+ TXGBE_POOLETHCTL_MCP | TXGBE_POOLETHCTL_UCP |
+ TXGBE_POOLETHCTL_VLP;
+ enable = 0;
+ break;
+ case TXGBEVF_XCAST_MODE_MULTI:
+ disable = TXGBE_POOLETHCTL_MCP | TXGBE_POOLETHCTL_UCP |
+ TXGBE_POOLETHCTL_VLP;
+ enable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA;
+ break;
+ case TXGBEVF_XCAST_MODE_ALLMULTI:
+ disable = TXGBE_POOLETHCTL_UCP | TXGBE_POOLETHCTL_VLP;
+ enable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA |
+ TXGBE_POOLETHCTL_MCP;
+ break;
+ case TXGBEVF_XCAST_MODE_PROMISC:
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ if (!(fctrl & TXGBE_PSRCTL_UCP)) {
+ /* VF promisc requires PF in promisc */
+ PMD_DRV_LOG(ERR,
+ "Enabling VF promisc requires PF in promisc\n");
+ return -1;
+ }
+
+ disable = 0;
+ enable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA |
+ TXGBE_POOLETHCTL_MCP | TXGBE_POOLETHCTL_UCP |
+ TXGBE_POOLETHCTL_VLP;
+ break;
+ default:
+ return -1;
+ }
+
+ vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
+ vmolr &= ~disable;
+ vmolr |= enable;
+ wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
+ vfinfo[vf].xcast_mode = xcast_mode;
+
+out:
+ msgbuf[1] = xcast_mode;
+
+ return 0;
+}
+
+static int
+txgbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_vf_info *vf_info = *(TXGBE_DEV_VFDATA(dev));
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+ struct rte_ether_addr *ea = (struct rte_ether_addr *)new_mac;
+ int index = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >>
+ TXGBE_VT_MSGINFO_SHIFT;
+
+ if (index) {
+ if (!rte_is_valid_assigned_ether_addr(ea)) {
+ PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf);
+ return -1;
+ }
+
+ vf_info[vf].mac_count++;
+
+ hw->mac.set_rar(hw, vf_info[vf].mac_count,
+ new_mac, vf, true);
+ } else {
+ if (vf_info[vf].mac_count) {
+ hw->mac.clear_rar(hw, vf_info[vf].mac_count);
+ vf_info[vf].mac_count = 0;
+ }
+ }
+ return 0;
+}
+
+static int
+txgbe_rcv_msg_from_vf(struct rte_eth_dev *eth_dev, uint16_t vf)
+{
+ uint16_t mbx_size = TXGBE_P2VMBX_SIZE;
+ uint16_t msg_size = TXGBE_VF_MSG_SIZE_DEFAULT;
+ uint32_t msgbuf[TXGBE_P2VMBX_SIZE];
+ int32_t retval;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev);
+ struct rte_pmd_txgbe_mb_event_param ret_param;
+
+ retval = txgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+ if (retval) {
+ PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
+ return retval;
+ }
+
+ /* do nothing with the message already been processed */
+ if (msgbuf[0] & (TXGBE_VT_MSGTYPE_ACK | TXGBE_VT_MSGTYPE_NACK))
+ return retval;
+
+ /* flush the ack before we write any messages back */
+ txgbe_flush(hw);
+
+ /**
+ * initialise structure to send to user application
+ * will return response from user in retval field
+ */
+ ret_param.retval = RTE_PMD_TXGBE_MB_EVENT_PROCEED;
+ ret_param.vfid = vf;
+ ret_param.msg_type = msgbuf[0] & 0xFFFF;
+ ret_param.msg = (void *)msgbuf;
+
+ /* perform VF reset */
+ if (msgbuf[0] == TXGBE_VF_RESET) {
+ int ret = txgbe_vf_reset(eth_dev, vf, msgbuf);
+
+ vfinfo[vf].clear_to_send = true;
+
+ /* notify application about VF reset */
+ rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_VF_MBOX,
+ &ret_param);
+ return ret;
+ }
+
+ /**
+ * ask user application if we allowed to perform those functions
+ * if we get ret_param.retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED
+ * then business as usual,
+ * if 0, do nothing and send ACK to VF
+ * if ret_param.retval > 1, do nothing and send NAK to VF
+ */
+ rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_VF_MBOX,
+ &ret_param);
+
+ retval = ret_param.retval;
+
+ /* check & process VF to PF mailbox message */
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case TXGBE_VF_SET_MAC_ADDR:
+ if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
+ retval = txgbe_vf_set_mac_addr(eth_dev, vf, msgbuf);
+ break;
+ case TXGBE_VF_SET_MULTICAST:
+ if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
+ retval = txgbe_vf_set_multicast(eth_dev, vf, msgbuf);
+ break;
+ case TXGBE_VF_SET_LPE:
+ if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
+ retval = txgbe_set_vf_lpe(eth_dev, vf, msgbuf);
+ break;
+ case TXGBE_VF_SET_VLAN:
+ if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
+ retval = txgbe_vf_set_vlan(eth_dev, vf, msgbuf);
+ break;
+ case TXGBE_VF_API_NEGOTIATE:
+ retval = txgbe_negotiate_vf_api(eth_dev, vf, msgbuf);
+ break;
+ case TXGBE_VF_GET_QUEUES:
+ retval = txgbe_get_vf_queues(eth_dev, vf, msgbuf);
+ msg_size = TXGBE_VF_GET_QUEUE_MSG_SIZE;
+ break;
+ case TXGBE_VF_UPDATE_XCAST_MODE:
+ if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
+ retval = txgbe_set_vf_mc_promisc(eth_dev, vf, msgbuf);
+ break;
+ case TXGBE_VF_SET_MACVLAN:
+ if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
+ retval = txgbe_set_vf_macvlan_msg(eth_dev, vf, msgbuf);
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (uint32_t)msgbuf[0]);
+ retval = TXGBE_ERR_MBX;
+ break;
+ }
+
+ /* response the VF according to the message process result */
+ if (retval)
+ msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= TXGBE_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS;
+
+ txgbe_write_mbx(hw, msgbuf, msg_size, vf);
+
+ return retval;
+}
+
+static inline void
+txgbe_rcv_ack_from_vf(struct rte_eth_dev *eth_dev, uint16_t vf)
+{
+ uint32_t msg = TXGBE_VT_MSGTYPE_NACK;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev);
+
+ if (!vfinfo[vf].clear_to_send)
+ txgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
+{
+ uint16_t vf;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+
+ for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
+ /* check & process vf function level reset */
+ if (!txgbe_check_for_rst(hw, vf))
+ txgbe_vf_reset_event(eth_dev, vf);
+
+ /* check & process vf mailbox messages */
+ if (!txgbe_check_for_msg(hw, vf))
+ txgbe_rcv_msg_from_vf(eth_dev, vf);
+
+ /* check & process acks from vf */
+ if (!txgbe_check_for_ack(hw, vf))
+ txgbe_rcv_ack_from_vf(eth_dev, vf);
+ }
+}