FW version = Y
Registers dump = Y
Module EEPROM dump = Y
+Timesync = Y
+Timestamp offload = Y
Multiprocess aware = Y
Linux = Y
ARMv8 = Y
- MTU update
- NUMA support
- Generic flow API
+- IEEE1588/802.1AS timestamping
Prerequisites
-------------
* Added support for outer UDP checksum in Kunpeng930.
* Added support for query Tx descriptor status.
* Added support for query Rx descriptor status.
+ * Added support for IEEE 1588 PTP.
* **Updated Intel iavf driver.**
HNS3_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HNS3_OPC_CONFIG_FEC_MODE = 0x031A,
+ /* PTP command */
+ HNS3_OPC_PTP_INT_EN = 0x0501,
+ HNS3_OPC_CFG_PTP_MODE = 0x0507,
+
/* PFC/Pause commands */
HNS3_OPC_CFG_MAC_PAUSE_EN = 0x0701,
HNS3_OPC_CFG_PFC_PAUSE_EN = 0x0702,
uint32_t rev1[2];
};
+#define HNS3_PTP_ENABLE_B 0
+#define HNS3_PTP_TX_ENABLE_B 1
+#define HNS3_PTP_RX_ENABLE_B 2
+
+#define HNS3_PTP_TYPE_S 0
+#define HNS3_PTP_TYPE_M (0x3 << HNS3_PTP_TYPE_S)
+
+#define ALL_PTP_V2_TYPE 0xF
+#define HNS3_PTP_MESSAGE_TYPE_S 0
+#define HNS3_PTP_MESSAGE_TYPE_M (0xF << HNS3_PTP_MESSAGE_TYPE_S)
+
+#define PTP_TYPE_L2_V2_TYPE 0
+
+struct hns3_ptp_mode_cfg_cmd {
+ uint8_t enable;
+ uint8_t ptp_type;
+ uint8_t v2_message_type_1;
+ uint8_t v2_message_type_0;
+ uint8_t rsv[20];
+};
+
+struct hns3_ptp_int_cmd {
+ uint8_t int_en;
+ uint8_t rsvd[23];
+};
+
#define HNS3_MAX_TQP_NUM_HIP08_PF 64
#define HNS3_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HNS3_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
HNS3_VECTOR0_EVENT_RST,
HNS3_VECTOR0_EVENT_MBX,
HNS3_VECTOR0_EVENT_ERR,
+ HNS3_VECTOR0_EVENT_PTP,
HNS3_VECTOR0_EVENT_OTHER,
};
goto out;
}
+ /* Check for vector0 1588 event source */
+ if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) {
+ val = BIT(HNS3_VECTOR0_1588_INT_B);
+ ret = HNS3_VECTOR0_EVENT_PTP;
+ goto out;
+ }
+
/* check for vector0 msix event source */
if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK ||
hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) {
return ret;
}
+static bool
+hns3_is_1588_event_type(uint32_t event_type)
+{
+ return (event_type == HNS3_VECTOR0_EVENT_PTP);
+}
+
static void
hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
{
- if (event_type == HNS3_VECTOR0_EVENT_RST)
+ if (event_type == HNS3_VECTOR0_EVENT_RST ||
+ hns3_is_1588_event_type(event_type))
hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
else if (event_type == HNS3_VECTOR0_EVENT_MBX)
hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) |
BIT(HNS3_VECTOR0_CORERESET_INT_B));
hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0);
+ hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP,
+ BIT(HNS3_VECTOR0_1588_INT_B));
}
static void
if (ret)
goto cfg_err;
+ ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf);
+ if (ret)
+ goto cfg_err;
+
ret = hns3_dev_configure_vlan(dev);
if (ret)
goto cfg_err;
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+ if (hns3_dev_ptp_supported(hw))
+ info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+
info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = HNS3_MAX_RING_DESC,
.nb_min = HNS3_MIN_RING_DESC,
goto err_intr_callback_register;
}
+ ret = hns3_ptp_init(hw);
+ if (ret)
+ goto err_get_config;
+
/* Enable interrupt */
rte_intr_enable(&pci_dev->intr_handle);
hns3_pf_enable_irq0(hw);
if (ret)
goto err_promisc;
+ ret = hns3_restore_ptp(hns);
+ if (ret)
+ goto err_promisc;
+
ret = hns3_restore_rx_interrupt(hw);
if (ret)
goto err_promisc;
.fec_set = hns3_fec_set,
.tm_ops_get = hns3_tm_ops_get,
.tx_done_cleanup = hns3_tx_done_cleanup,
+ .timesync_enable = hns3_timesync_enable,
+ .timesync_disable = hns3_timesync_disable,
+ .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp,
+ .timesync_adjust_time = hns3_timesync_adjust_time,
+ .timesync_read_time = hns3_timesync_read_time,
+ .timesync_write_time = hns3_timesync_write_time,
};
static const struct hns3_reset_ops hns3_reset_ops = {
bool support_sfp_query;
uint32_t fec_mode; /* current FEC mode for ethdev */
+ bool ptp_enable;
+
+ /* Stores timestamp of last received packet on dev */
+ uint64_t rx_timestamp;
+
struct hns3_vtag_cfg vtag_config;
LIST_HEAD(vlan_tbl, hns3_user_vlan_table) vlan_list;
void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
uint32_t link_speed, uint8_t link_duplex);
void hns3_parse_devargs(struct rte_eth_dev *dev);
+int hns3_restore_ptp(struct hns3_adapter *hns);
+int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev,
+ struct rte_eth_conf *conf);
+int hns3_ptp_init(struct hns3_hw *hw);
+int hns3_timesync_enable(struct rte_eth_dev *dev);
+int hns3_timesync_disable(struct rte_eth_dev *dev);
+int hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused);
+int hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+int hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts);
+int hns3_timesync_write_time(struct rte_eth_dev *dev,
+ const struct timespec *ts);
+int hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
static inline bool
is_reset_pending(struct hns3_adapter *hns)
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021-2021 Hisilicon Limited.
+ */
+
+#include <ethdev_pci.h>
+#include <rte_io.h>
+#include <rte_time.h>
+
+#include "hns3_ethdev.h"
+#include "hns3_regs.h"
+#include "hns3_logs.h"
+
+uint64_t hns3_timestamp_rx_dynflag;
+int hns3_timestamp_dynfield_offset = -1;
+
+int
+hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev,
+ struct rte_eth_conf *conf)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ return 0;
+
+ ret = rte_mbuf_dyn_rx_timestamp_register
+ (&hns3_timestamp_dynfield_offset,
+ &hns3_timestamp_rx_dynflag);
+ if (ret) {
+ hns3_err(hw,
+ "failed to register Rx timestamp field/flag");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hns3_ptp_int_en(struct hns3_hw *hw, bool en)
+{
+ struct hns3_ptp_int_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ req = (struct hns3_ptp_int_cmd *)desc.data;
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PTP_INT_EN, false);
+ req->int_en = en ? 1 : 0;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw,
+ "failed to %s ptp interrupt, ret = %d\n",
+ en ? "enable" : "disable", ret);
+
+ return ret;
+}
+
+int
+hns3_ptp_init(struct hns3_hw *hw)
+{
+ int ret;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return 0;
+
+ ret = hns3_ptp_int_en(hw, true);
+ if (ret)
+ return ret;
+
+ /* Start PTP timer */
+ hns3_write_dev(hw, HNS3_CFG_TIME_CYC_EN, 1);
+
+ return 0;
+}
+
+static int
+hns3_timesync_configure(struct hns3_adapter *hns, bool en)
+{
+ struct hns3_ptp_mode_cfg_cmd *req;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_cmd_desc desc;
+ int val;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PTP_MODE, false);
+
+ req = (struct hns3_ptp_mode_cfg_cmd *)desc.data;
+
+ val = en ? 1 : 0;
+ hns3_set_bit(req->enable, HNS3_PTP_ENABLE_B, val);
+ hns3_set_bit(req->enable, HNS3_PTP_TX_ENABLE_B, val);
+ hns3_set_bit(req->enable, HNS3_PTP_RX_ENABLE_B, val);
+
+ if (en) {
+ hns3_set_field(req->ptp_type, HNS3_PTP_TYPE_M, HNS3_PTP_TYPE_S,
+ PTP_TYPE_L2_V2_TYPE);
+ hns3_set_field(req->v2_message_type_1, HNS3_PTP_MESSAGE_TYPE_M,
+ HNS3_PTP_MESSAGE_TYPE_S, ALL_PTP_V2_TYPE);
+ }
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "configure PTP time failed, en = %d, ret = %d",
+ en, ret);
+ return ret;
+ }
+
+ pf->ptp_enable = en;
+
+ return 0;
+}
+
+int
+hns3_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+ int ret;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ if (pf->ptp_enable)
+ return 0;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_timesync_configure(hns, true);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+}
+
+int
+hns3_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+ int ret;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ if (!pf->ptp_enable)
+ return 0;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_timesync_configure(hns, false);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+int
+hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused)
+{
+#define TIME_RX_STAMP_NS_MASK 0x3FFFFFFF
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_pf *pf = &hns->pf;
+ uint64_t ns, sec;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ ns = pf->rx_timestamp & TIME_RX_STAMP_NS_MASK;
+ sec = upper_32_bits(pf->rx_timestamp);
+
+ ns += sec * NSEC_PER_SEC;
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+int
+hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+#define TIME_TX_STAMP_NS_MASK 0x3FFFFFFF
+#define TIME_TX_STAMP_VALID 24
+#define TIME_TX_STAMP_CNT_MASK 0x7
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ uint64_t sec;
+ uint64_t tmp;
+ uint64_t ns;
+ int ts_cnt;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ ts_cnt = hns3_read_dev(hw, HNS3_TX_1588_BACK_TSP_CNT) &
+ TIME_TX_STAMP_CNT_MASK;
+ if (ts_cnt == 0)
+ return -EINVAL;
+
+ ns = hns3_read_dev(hw, HNS3_TX_1588_TSP_BACK_0) & TIME_TX_STAMP_NS_MASK;
+ sec = hns3_read_dev(hw, HNS3_TX_1588_TSP_BACK_1);
+ tmp = hns3_read_dev(hw, HNS3_TX_1588_TSP_BACK_2) & 0xFFFF;
+ sec = (tmp << 32) | sec;
+
+ ns += sec * NSEC_PER_SEC;
+
+ *timestamp = rte_ns_to_timespec(ns);
+
+ /* Clear current timestamp hardware stores */
+ hns3_read_dev(hw, HNS3_TX_1588_SEQID_BACK);
+
+ return 0;
+}
+
+int
+hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t ns, sec;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ sec = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_L);
+ sec |= (uint64_t)(hns3_read_dev(hw, HNS3_CURR_TIME_OUT_H) & 0xFFFF)
+ << 32;
+
+ ns = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_NS);
+ ns += sec * NSEC_PER_SEC;
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+int
+hns3_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t sec = ts->tv_sec;
+ uint64_t ns = ts->tv_nsec;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ /* Set the timecounters to a new value. */
+ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_H, upper_32_bits(sec));
+ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_M, lower_32_bits(sec));
+ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_L, lower_32_bits(ns));
+ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_RDY, 1);
+
+ return 0;
+}
+
+int
+hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+#define TIME_SYNC_L_MASK 0x7FFFFFFF
+#define SYMBOL_BIT_OFFSET 31
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct timespec cur_time;
+ uint64_t ns;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
+ (void)hns3_timesync_read_time(dev, &cur_time);
+ ns = rte_timespec_to_ns((const struct timespec *)&cur_time);
+ cur_time = rte_ns_to_timespec(ns + delta);
+ (void)hns3_timesync_write_time(dev, (const struct timespec *)&cur_time);
+
+ return 0;
+}
+
+int
+hns3_restore_ptp(struct hns3_adapter *hns)
+{
+ struct hns3_pf *pf = &hns->pf;
+ struct hns3_hw *hw = &hns->hw;
+ bool en = pf->ptp_enable;
+ int ret;
+
+ if (!hns3_dev_ptp_supported(hw))
+ return 0;
+
+ ret = hns3_timesync_configure(hns, en);
+ if (ret)
+ hns3_err(hw, "restore PTP enable state(%d) failed, ret = %d",
+ en, ret);
+
+ return ret;
+}
#define HNS3_TQP_INTR_RL_DEFAULT 0
#define HNS3_TQP_INTR_QL_DEFAULT 0
+/* Register bit for 1588 event */
+#define HNS3_VECTOR0_1588_INT_B 0
+
+#define HNS3_PTP_BASE_ADDRESS 0x29000
+
+#define HNS3_TX_1588_SEQID_BACK (HNS3_PTP_BASE_ADDRESS + 0x0)
+#define HNS3_TX_1588_TSP_BACK_0 (HNS3_PTP_BASE_ADDRESS + 0x4)
+#define HNS3_TX_1588_TSP_BACK_1 (HNS3_PTP_BASE_ADDRESS + 0x8)
+#define HNS3_TX_1588_TSP_BACK_2 (HNS3_PTP_BASE_ADDRESS + 0xc)
+
+#define HNS3_TX_1588_BACK_TSP_CNT (HNS3_PTP_BASE_ADDRESS + 0x30)
+
+#define HNS3_CFG_TIME_SYNC_H (HNS3_PTP_BASE_ADDRESS + 0x50)
+#define HNS3_CFG_TIME_SYNC_M (HNS3_PTP_BASE_ADDRESS + 0x54)
+#define HNS3_CFG_TIME_SYNC_L (HNS3_PTP_BASE_ADDRESS + 0x58)
+#define HNS3_CFG_TIME_SYNC_RDY (HNS3_PTP_BASE_ADDRESS + 0x5c)
+
+#define HNS3_CFG_TIME_CYC_EN (HNS3_PTP_BASE_ADDRESS + 0x70)
+
+#define HNS3_CURR_TIME_OUT_H (HNS3_PTP_BASE_ADDRESS + 0x74)
+#define HNS3_CURR_TIME_OUT_L (HNS3_PTP_BASE_ADDRESS + 0x78)
+#define HNS3_CURR_TIME_OUT_NS (HNS3_PTP_BASE_ADDRESS + 0x7c)
+
/* gl_usec convert to hardware count, as writing each 1 represents 2us */
#define HNS3_GL_USEC_TO_REG(gl_usec) ((gl_usec) >> 1)
/* rl_usec convert to hardware count, as writing each 1 represents 4us */
return rte_mbuf_raw_alloc(rxq->mb_pool);
}
+static inline void
+hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf,
+ volatile struct hns3_desc *rxd)
+{
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns);
+ uint64_t timestamp = rte_le_to_cpu_64(rxd->timestamp);
+
+ mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
+ if (hns3_timestamp_rx_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = timestamp;
+ mbuf->ol_flags |= hns3_timestamp_rx_dynflag;
+ }
+
+ pf->rx_timestamp = timestamp;
+}
+
uint16_t
hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
}
rxm = rxe->mbuf;
+ rxm->ol_flags = 0;
rxe->mbuf = nmb;
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
+ hns3_rx_ptp_timestamp_handle(rxq, rxm, rxdp);
+
dma_addr = rte_mbuf_data_iova_default(nmb);
rxdp->addr = rte_cpu_to_le_64(dma_addr);
rxdp->rx.bd_base_info = 0;
rxm->data_len = rxm->pkt_len;
rxm->port = rxq->port_id;
rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
- rxm->ol_flags = PKT_RX_RSS_HASH;
+ rxm->ol_flags |= PKT_RX_RSS_HASH;
if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
rxm->hash.fdir.hi =
rte_le_to_cpu_16(rxd.rx.fd_id);
rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
+ if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
+ rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
+
if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
cksum_err);
{
desc->addr = rte_mbuf_data_iova(rxm);
desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
- desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
+ desc->tx.tp_fe_sc_vld_ra_ri |= rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
}
static void
rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
}
+
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ desc->tx.tp_fe_sc_vld_ra_ri |=
+ rte_cpu_to_le_16(BIT(HNS3_TXD_TSYN_B));
}
static inline int
return 0;
}
+static bool
+hns3_tx_check_simple_support(struct rte_eth_dev *dev)
+{
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hns3_dev_ptp_supported(hw))
+ return false;
+
+ return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
+}
+
static eth_tx_burst_t
hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
{
- uint64_t offloads = dev->data->dev_conf.txmode.offloads;
struct hns3_adapter *hns = dev->data->dev_private;
bool vec_allowed, sve_allowed, simple_allowed;
hns3_tx_check_vec_support(dev) == 0;
sve_allowed = vec_allowed && hns3_check_sve_support();
simple_allowed = hns->tx_simple_allowed &&
- offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+ hns3_tx_check_simple_support(dev);
*prep = NULL;
#define HNS3_RXD_L3L4P_B 11
#define HNS3_RXD_TSIND_S 12
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
+
+#define HNS3_RXD_TS_VLD_B 14
#define HNS3_RXD_LKBK_B 15
#define HNS3_RXD_GRO_SIZE_S 16
#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
struct hns3_desc {
union {
uint64_t addr;
+ uint64_t timestamp;
+
struct {
uint32_t addr0;
uint32_t addr1;
HNS3_OUTER_L4_CKSUM_ERR = 8
};
+extern uint64_t hns3_timestamp_rx_dynflag;
+extern int hns3_timestamp_dynfield_offset;
+
static inline int
hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
uint32_t bd_base_info, uint32_t l234_info,
{
struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
/* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */
if (txmode->offloads != DEV_TX_OFFLOAD_MBUF_FAST_FREE)
return -ENOTSUP;
memset(rxq->offset_table, 0, sizeof(rxq->offset_table));
}
-#ifndef RTE_LIBRTE_IEEE1588
static int
hns3_rxq_vec_check(struct hns3_rx_queue *rxq, void *arg)
{
RTE_SET_USED(arg);
return 0;
}
-#endif
int
hns3_rx_check_vec_support(struct rte_eth_dev *dev)
{
-#ifndef RTE_LIBRTE_IEEE1588
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
uint64_t offloads_mask = DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_VLAN;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hns3_dev_ptp_supported(hw))
+ return -ENOTSUP;
+
if (dev->data->scattered_rx)
return -ENOTSUP;
return -ENOTSUP;
return 0;
-#else
- RTE_SET_USED(dev);
- return -ENOTSUP;
-#endif
}
'hns3_rxtx.c',
'hns3_stats.c',
'hns3_mp.c',
- 'hns3_tm.c')
+ 'hns3_tm.c',
+ 'hns3_ptp.c')
deps += ['hash']