DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- info->tx_queue_offload_capa);
+ info->tx_queue_offload_capa |
+ hns3_txvlan_cap_get(hw));
info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = HNS3_MAX_RING_DESC,
return ret;
}
+static inline uint64_t
+hns3_txvlan_cap_get(struct hns3_hw *hw)
+{
+ if (hw->port_base_vlan_cfg.state)
+ return DEV_TX_OFFLOAD_VLAN_INSERT;
+ else
+ return DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT;
+}
+
#endif /* _HNS3_ETHDEV_H_ */
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- info->tx_queue_offload_capa);
+ info->tx_queue_offload_capa |
+ hns3_txvlan_cap_get(hw));
info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = HNS3_MAX_RING_DESC,
return 0;
}
+static int
+hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
+{
+ uint8_t resp_msg;
+ int ret;
+
+ ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
+ HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0,
+ true, &resp_msg, sizeof(resp_msg));
+ if (ret) {
+ if (ret == -ETIME) {
+ /*
+ * Getting current port based VLAN state from PF driver
+ * will not affect VF driver's basic function. Because
+ * the VF driver relies on hns3 PF kernel ether driver,
+ * to avoid introducing compatibility issues with older
+ * version of PF driver, no failure will be returned
+ * when the return value is ETIME. This return value has
+ * the following scenarios:
+ * 1) Firmware didn't return the results in time
+ * 2) the result return by firmware is timeout
+ * 3) the older version of kernel side PF driver does
+ * not support this mailbox message.
+ * For scenarios 1 and 2, it is most likely that a
+ * hardware error has occurred, or a hardware reset has
+ * occurred. In this case, these errors will be caught
+ * by other functions.
+ */
+ PMD_INIT_LOG(WARNING,
+ "failed to get PVID state for timeout, maybe "
+ "kernel side PF driver doesn't support this "
+ "mailbox message, or firmware didn't respond.");
+ resp_msg = HNS3_PORT_BASE_VLAN_DISABLE;
+ } else {
+ PMD_INIT_LOG(ERR, "failed to get port based VLAN state,"
+ " ret = %d", ret);
+ return ret;
+ }
+ }
+ hw->port_base_vlan_cfg.state = resp_msg ?
+ HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
+ return 0;
+}
static int
hns3vf_get_queue_info(struct hns3_hw *hw)
if (ret)
return ret;
+ ret = hns3vf_get_port_base_vlan_filter_state(hw);
+ if (ret)
+ return ret;
+
/* Get tc configuration from PF */
return hns3vf_get_tc_info(hw);
}
if (ret)
goto err_vlan_table;
+ ret = hns3vf_get_port_base_vlan_filter_state(hw);
+ if (ret)
+ goto err_vlan_table;
+
ret = hns3vf_restore_rx_interrupt(hw);
if (ret)
goto err_vlan_table;
#include "hns3_regs.h"
#include "hns3_logs.h"
#include "hns3_intr.h"
+#include "hns3_rxtx.h"
#define HNS3_CMD_CODE_OFFSET 2
hns3_update_link_status(hw);
}
+static void
+hns3_update_port_base_vlan_info(struct hns3_hw *hw,
+ struct hns3_mbx_pf_to_vf_cmd *req)
+{
+#define PVID_STATE_OFFSET 1
+ uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ?
+ HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
+ /*
+ * Currently, hardware doesn't support more than two layers VLAN offload
+ * based on hns3 network engine, which would cause packets loss or wrong
+ * packets for these types of packets. If the hns3 PF kernel ethdev
+ * driver sets the PVID for VF device after initialization of the
+ * related VF device, the PF driver will notify VF driver to update the
+ * PVID configuration state. The VF driver will update the PVID
+ * configuration state immediately to ensure that the VLAN process in Tx
+ * and Rx is correct. But in the window period of this state transition,
+ * packets loss or packets with wrong VLAN may occur.
+ */
+ if (hw->port_base_vlan_cfg.state != new_pvid_state) {
+ hw->port_base_vlan_cfg.state = new_pvid_state;
+ hns3_update_all_queues_pvid_state(hw);
+ }
+}
+
static void
hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
{
case HNS3_MBX_PUSH_LINK_STATUS:
hns3_handle_link_change_event(hw, req);
break;
+ case HNS3_MBX_PUSH_VLAN_INFO:
+ /*
+ * When the PVID configuration status of VF device is
+ * changed by the hns3 PF kernel driver, VF driver will
+ * receive this mailbox message from PF driver.
+ */
+ hns3_update_port_base_vlan_info(hw, req);
+ break;
case HNS3_MBX_PUSH_PROMISC_INFO:
/*
* When the trust status of VF device changed by the
HNS3_MBX_SET_MTU, /* (VF -> PF) set mtu */
HNS3_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
+ HNS3_MBX_PUSH_VLAN_INFO = 34, /* (PF -> VF) push port base vlan */
+
HNS3_MBX_PUSH_PROMISC_INFO = 36, /* (PF -> VF) push vf promisc info */
HNS3_MBX_HANDLE_VF_TBL = 38, /* (VF -> PF) store/clear hw cfg tbl */
HNS3_MBX_VLAN_FILTER = 0, /* set vlan filter */
HNS3_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
HNS3_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
+ HNS3_MBX_GET_PORT_BASE_VLAN_STATE = 4, /* get port based vlan state */
};
enum hns3_mbx_tbl_cfg_subcode {
return 0;
}
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+static inline int
+hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
+{
+ struct rte_ether_hdr *eh;
+ struct rte_vlan_hdr *vh;
+
+ if (!txq->pvid_state)
+ return 0;
+
+ /*
+ * Due to hardware limitations, we only support two-layer VLAN hardware
+ * offload in Tx direction based on hns3 network engine, so when PVID is
+ * enabled, QinQ insert is no longer supported.
+ * And when PVID is enabled, in the following two cases:
+ * i) packets with more than two VLAN tags.
+ * ii) packets with one VLAN tag while the hardware VLAN insert is
+ * enabled.
+ * The packets will be regarded as abnormal packets and discarded by
+ * hardware in Tx direction. For debugging purposes, a validation check
+ * for these types of packets is added to the '.tx_pkt_prepare' ops
+ * implementation function named hns3_prep_pkts to inform users that
+ * these packets will be discarded.
+ */
+ if (m->ol_flags & PKT_TX_QINQ_PKT)
+ return -EINVAL;
+
+ eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
+ if (m->ol_flags & PKT_TX_VLAN_PKT)
+ return -EINVAL;
+
+ /* Ensure the incoming packet is not a QinQ packet */
+ vh = (struct rte_vlan_hdr *)(eh + 1);
+ if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
uint16_t
hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
rte_errno = -ret;
return i;
}
+
+ if (hns3_vld_vlan_chk(tx_queue, m)) {
+ rte_errno = EINVAL;
+ return i;
+ }
#endif
ret = rte_net_intel_cksum_prepare(m);
if (ret != 0) {