#include "base/ice_flow.h"
#include "base/ice_dcb.h"
#include "base/ice_common.h"
+#include "base/ice_ptp_hw.h"
#include "rte_pmd_ice.h"
#include "ice_ethdev.h"
#define ICE_ONE_PPS_OUT_ARG "pps_out"
#define ICE_RX_LOW_LATENCY_ARG "rx_low_latency"
+#define ICE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+
uint64_t ice_timestamp_dynflag;
int ice_timestamp_dynfield_offset = -1;
NULL
};
-#define NSEC_PER_SEC 1000000000
#define PPS_OUT_DELAY_NS 1
static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
struct rte_eth_udp_tunnel *udp_tunnel);
static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_timesync_enable(struct rte_eth_dev *dev);
+static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ice_timesync_read_time(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int ice_timesync_write_time(struct rte_eth_dev *dev,
+ const struct timespec *timestamp);
+static int ice_timesync_disable(struct rte_eth_dev *dev);
static const struct rte_pci_id pci_id_ice_map[] = {
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
.udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
.tx_done_cleanup = ice_tx_done_cleanup,
.get_monitor_addr = ice_get_monitor_addr,
+ .timesync_enable = ice_timesync_enable,
+ .timesync_read_rx_timestamp = ice_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = ice_timesync_read_tx_timestamp,
+ .timesync_adjust_time = ice_timesync_adjust_time,
+ .timesync_read_time = ice_timesync_read_time,
+ .timesync_write_time = ice_timesync_write_time,
+ .timesync_disable = ice_timesync_disable,
};
/* store statistics names and its offset in stats structure */
return ret;
}
+static int
+ice_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int ret;
+
+ if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_TIMESTAMP)) {
+ PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
+ return -1;
+ }
+
+ if (hw->func_caps.ts_func_info.src_tmr_owned) {
+ ret = ice_ptp_init_phc(hw);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to initialize PHC");
+ return -1;
+ }
+
+ ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to write PHC increment time value");
+ return -1;
+ }
+ }
+
+ /* Initialize cycle counters for system time/RX/TX timestamp */
+ memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+ ad->systime_tc.cc_shift = 0;
+ ad->systime_tc.nsec_mask = 0;
+
+ ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+ ad->rx_tstamp_tc.cc_shift = 0;
+ ad->rx_tstamp_tc.nsec_mask = 0;
+
+ ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+ ad->tx_tstamp_tc.cc_shift = 0;
+ ad->tx_tstamp_tc.nsec_mask = 0;
+
+ ad->ptp_ena = 1;
+
+ return 0;
+}
+
+static int
+ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp, uint32_t flags)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct ice_rx_queue *rxq;
+ uint32_t ts_high;
+ uint64_t ts_ns, ns;
+
+ rxq = dev->data->rx_queues[flags];
+
+ ts_high = rxq->time_high;
+ ts_ns = ice_tstamp_convert_32b_64b(hw, ts_high);
+ ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint8_t lport;
+ uint64_t ts_ns, ns, tstamp;
+ const uint64_t mask = 0xFFFFFFFF;
+ int ret;
+
+ lport = hw->port_info->lport;
+
+ ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
+ return -1;
+ }
+
+ ts_ns = ice_tstamp_convert_32b_64b(hw, (tstamp >> 8) & mask);
+ ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ ad->systime_tc.nsec += delta;
+ ad->rx_tstamp_tc.nsec += delta;
+ ad->tx_tstamp_tc.nsec += delta;
+
+ return 0;
+}
+
+static int
+ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint64_t ns;
+
+ ns = rte_timespec_to_ns(ts);
+
+ ad->systime_tc.nsec = ns;
+ ad->rx_tstamp_tc.nsec = ns;
+ ad->tx_tstamp_tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint32_t hi, lo, lo2;
+ uint64_t time, ns;
+
+ lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+ hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+ lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+
+ if (lo2 < lo) {
+ lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+ hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+ }
+
+ time = ((uint64_t)hi << 32) | lo;
+ ns = rte_timecounter_update(&ad->systime_tc, time);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ice_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint64_t val;
+ uint8_t lport;
+
+ lport = hw->port_info->lport;
+
+ ice_clear_phy_tstamp(hw, lport, 0);
+
+ val = ICE_READ_REG(hw, GLTSYN_ENA(0));
+ val &= ~GLTSYN_ENA_TSYN_ENA_M;
+ ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
+
+ ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
+ ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
+
+ ad->ptp_ena = 0;
+
+ return 0;
+}
+
static int
ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
+ struct ice_adapter *ad = rxq->vsi->adapter;
/* Set buffer size as the head split is disabled. */
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
- if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
regval |= QRXFLXP_CNTXT_TS_M;
ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
tx_ctx.tso_ena = 1; /* tso enable */
tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+ tx_ctx.tsyn_ena = 1;
ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
ice_tlan_ctx_info);
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
uint64_t ts_ns;
+ struct ice_adapter *ad = rxq->vsi->adapter;
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
}
}
+ if (ad->ptp_ena && ((mb->packet_type &
+ RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+ mb->timesync = rxq->queue_id;
+ pkt_flags |= PKT_RX_IEEE1588_PTP;
+ }
+
mb->ol_flags |= pkt_flags;
}
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
uint64_t ts_ns;
+ struct ice_adapter *ad = rxq->vsi->adapter;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
}
}
+ if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
+ == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+ first_seg->timesync = rxq->queue_id;
+ pkt_flags |= PKT_RX_IEEE1588_PTP;
+ }
+
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
uint64_t ts_ns;
+ struct ice_adapter *ad = rxq->vsi->adapter;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
}
}
+ if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
+ RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+ rxm->timesync = rxq->queue_id;
+ pkt_flags |= PKT_RX_IEEE1588_PTP;
+ }
+
rxm->ol_flags |= pkt_flags;
/* copy old mbuf to rx_pkts */
rx_pkts[nb_rx++] = rxm;
static uint64_t mask = PKT_TX_TCP_SEG |
PKT_TX_QINQ |
PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_TUNNEL_MASK;
+ PKT_TX_TUNNEL_MASK |
+ PKT_TX_IEEE1588_TMST;
return (flags & mask) ? 1 : 0;
}
if (ol_flags & PKT_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
ice_set_tso_ctx(tx_pkt, tx_offload);
+ else if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cd_type_cmd_tso_mss |=
+ ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+ ICE_TXD_CTX_QW1_CMD_S);
ctx_txd->tunneling_params =
rte_cpu_to_le_32(cd_tunneling_params);
ad->rx_use_avx512 = false;
ad->rx_use_avx2 = false;
rx_check_ret = ice_rx_vec_dev_check(dev);
+ if (ad->ptp_ena)
+ rx_check_ret = -1;
if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
ad->rx_vec_allowed = true;