struct ixgbe_ethertype_filter ethertype_filter;
if (!hw->mac.ops.set_ethertype_anti_spoofing) {
- RTE_LOG(INFO, PMD, "ether type anti-spoofing is not"
- " supported.\n");
+ PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported.\n");
return;
}
i = ixgbe_ethertype_filter_lookup(filter_info,
IXGBE_ETHERTYPE_FLOW_CTRL);
if (i >= 0) {
- RTE_LOG(ERR, PMD, "A ether type filter"
- " entity for flow control already exists!\n");
+ PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!\n");
return;
}
i = ixgbe_ethertype_filter_insert(filter_info,
ðertype_filter);
if (i < 0) {
- RTE_LOG(ERR, PMD, "Cannot find an unused ether type filter"
- " entity for flow control.\n");
+ PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control.\n");
return;
}
vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
- RTE_LOG(INFO, PMD, "VF %u: disabling multicast promiscuous\n", vf);
+ PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf);
vmolr &= ~IXGBE_VMOLR_MPE;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t new_mtu = msgbuf[1];
uint32_t max_frs;
+ uint32_t hlreg0;
int max_frame = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
/* X540 and X550 support jumbo frames in IOV mode */
max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
if (max_frs < new_mtu) {
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ if (new_mtu > RTE_ETHER_MAX_LEN) {
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+ } else {
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
}
break;
}
- RTE_LOG(ERR, PMD, "Negotiate invalid api version %u from VF %d\n",
+ PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n",
api_version, vf);
return -1;
switch (eth_conf->txmode.mq_mode) {
case ETH_MQ_TX_NONE:
case ETH_MQ_TX_DCB:
- RTE_LOG(ERR, PMD, "PF must work with virtualization for VF %u"
+ PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
", but its tx mode = %d\n", vf,
eth_conf->txmode.mq_mode);
return -1;
break;
default:
- RTE_LOG(ERR, PMD, "PF work with invalid mode = %d\n",
+ PMD_DRV_LOG(ERR, "PF work with invalid mode = %d\n",
eth_conf->txmode.mq_mode);
return -1;
}
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
if (!(fctrl & IXGBE_FCTRL_UPE)) {
/* VF promisc requires PF in promisc */
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Enabling VF promisc requires PF in promisc\n");
return -1;
}
return 0;
}
+static int
+ixgbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vf_info =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+ int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+ IXGBE_VT_MSGINFO_SHIFT;
+
+ if (index) {
+ if (new_mac == NULL)
+ return -1;
+
+ if (!rte_is_valid_assigned_ether_addr(
+ (struct rte_ether_addr *)new_mac)) {
+ PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf);
+ return -1;
+ }
+
+ vf_info[vf].mac_count++;
+
+ hw->mac.ops.set_rar(hw, vf_info[vf].mac_count,
+ new_mac, vf, IXGBE_RAH_AV);
+ } else {
+ hw->mac.ops.clear_rar(hw, vf_info[vf].mac_count);
+ vf_info[vf].mac_count = 0;
+ }
+ return 0;
+}
+
static int
ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
{
if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf);
break;
+ case IXGBE_VF_SET_MACVLAN:
+ if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
+ retval = ixgbe_set_vf_macvlan_msg(dev, vf, msgbuf);
+ break;
default:
PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
retval = IXGBE_ERR_MBX;