Dump Rx timestamp value into dynamic mbuf field by flex descriptor.
This feature is turned on by dev config "enable-rx-timestamp".
Currently, it's only supported under scalar path.
Signed-off-by: Simei Su <simei.su@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
VLAN offload = Y
L3 checksum offload = P
L4 checksum offload = P
+Timestamp offload = P
Packet type parsing = Y
Rx descriptor status = Y
Tx descriptor status = Y
* Added Tx QoS queue rate limitation support.
* Added quanta size configuration support.
+ * Added ``DEV_RX_OFFLOAD_TIMESTAMP`` support.
* **Updated Mellanox mlx5 driver.**
struct iavf_tm_conf tm_conf;
struct rte_eth_dev *eth_dev;
+
+ uint32_t ptp_caps;
};
#define IAVF_MAX_PKT_TYPE 1024
bool stopped;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
+ uint64_t phc_time;
};
/* IAVF_DEV_PRIVATE_TO */
uint8_t *msg, size_t msg_len,
uint8_t *resp_msg, size_t resp_msg_len);
extern const struct rte_tm_ops iavf_tm_ops;
+int iavf_get_ptp_cap(struct iavf_adapter *adapter);
+int iavf_get_phc_time(struct iavf_adapter *adapter);
#endif /* _IAVF_ETHDEV_H_ */
#define IAVF_PROTO_XTR_ARG "proto_xtr"
#define IAVF_QUANTA_SIZE_ARG "quanta_size"
+uint64_t iavf_timestamp_dynflag;
+int iavf_timestamp_dynfield_offset = -1;
+
static const char * const iavf_valid_args[] = {
IAVF_PROTO_XTR_ARG,
IAVF_QUANTA_SIZE_ARG,
struct rte_eth_dev_data *dev_data = dev->data;
uint16_t buf_size, max_pkt_len;
uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
+ enum iavf_status err;
buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
return -EINVAL;
}
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ /* Register mbuf field and flag for Rx timestamp */
+ err = rte_mbuf_dyn_rx_timestamp_register(
+ &iavf_timestamp_dynfield_offset,
+ &iavf_timestamp_dynflag);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Cannot register mbuf field/flag for timestamp");
+ return -EINVAL;
+ }
+ }
+
rxq->max_pkt_len = max_pkt_len;
if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
rxq->max_pkt_len > buf_size) {
return -1;
}
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP) {
+ if (iavf_get_ptp_cap(adapter)) {
+ PMD_INIT_LOG(ERR, "Failed to get ptp capability");
+ return -1;
+ }
+ }
+
if (iavf_init_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "failed to do Queue init");
return -1;
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
if (iavf_ipsec_crypto_supported(adapter)) {
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ struct iavf_adapter *ad = rxq->vsi->adapter;
+ uint64_t ts_ns;
+
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ rxq->hw_register_set = 1;
+
while (nb_rx < nb_pkts) {
rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
&rxq->stats.ipsec_crypto);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+
+ if (iavf_timestamp_dynflag > 0) {
+ if (rxq->hw_register_set)
+ iavf_get_phc_time(ad);
+
+ rxq->hw_register_set = 0;
+ ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+ *RTE_MBUF_DYNFIELD(rxm,
+ iavf_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ rxm->ol_flags |= iavf_timestamp_dynflag;
+ }
+
rxm->ol_flags |= pkt_flags;
rx_pkts[nb_rx++] = rxm;
uint16_t rx_stat_err0;
uint64_t dma_addr;
uint64_t pkt_flags;
+ struct iavf_adapter *ad = rxq->vsi->adapter;
+ uint64_t ts_ns;
volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
volatile union iavf_rx_flex_desc *rxdp;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ rxq->hw_register_set = 1;
+
while (nb_rx < nb_pkts) {
rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+ if (iavf_timestamp_dynflag > 0) {
+ if (rxq->hw_register_set)
+ iavf_get_phc_time(ad);
+
+ rxq->hw_register_set = 0;
+ ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+ *RTE_MBUF_DYNFIELD(first_seg,
+ iavf_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ first_seg->ol_flags |= iavf_timestamp_dynflag;
+ }
+
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
int32_t nb_staged = 0;
uint64_t pkt_flags;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ struct iavf_adapter *ad = rxq->vsi->adapter;
+ uint64_t ts_ns;
rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
return 0;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ rxq->hw_register_set = 1;
+
/* Scan LOOK_AHEAD descriptors at a time to determine which
* descriptors reference packets that are ready to be received.
*/
stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
+ if (iavf_timestamp_dynflag > 0) {
+ if (rxq->hw_register_set)
+ iavf_get_phc_time(ad);
+
+ rxq->hw_register_set = 0;
+ ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
+
+ *RTE_MBUF_DYNFIELD(mb,
+ iavf_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ mb->ol_flags |= iavf_timestamp_dynflag;
+ }
+
mb->ol_flags |= pkt_flags;
/* Put up to nb_pkts directly into buffers */
#define IAVF_TX_OFFLOAD_NOTSUP_MASK \
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
+extern uint64_t iavf_timestamp_dynflag;
+extern int iavf_timestamp_dynfield_offset;
+
/**
* Rx Flex Descriptors
* These descriptors are used instead of the legacy version descriptors
/* flexible descriptor metadata extraction offload flag */
struct iavf_rx_queue_stats stats;
uint64_t offloads;
+ uint32_t hw_register_set;
};
struct iavf_tx_entry {
}
}
+static inline
+uint64_t iavf_tstamp_convert_32b_64b(uint64_t time, uint32_t in_timestamp)
+{
+ const uint64_t mask = 0xFFFFFFFF;
+ uint32_t delta;
+ uint64_t ns;
+
+ delta = (in_timestamp - (uint32_t)(time & mask));
+ if (delta > (mask / 2)) {
+ delta = ((uint32_t)(time & mask) - in_timestamp);
+ ns = time - delta;
+ } else {
+ ns = time + delta;
+ }
+
+ return ns;
+}
+
#ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
#define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
iavf_dump_rx_descriptor(rxq, desc, rx_id)
if (rxq->proto_xtr != IAVF_PROTO_XTR_NONE)
return -1;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ return -1;
+
if (rxq->offloads & IAVF_RX_VECTOR_OFFLOAD)
return IAVF_VECTOR_OFFLOAD_PATH;
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
VIRTCHNL_VF_LARGE_NUM_QPAIRS |
VIRTCHNL_VF_OFFLOAD_QOS |
- VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
+ VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO |
+ VIRTCHNL_VF_CAP_PTP;
args.in_args = (uint8_t *)∩︀
args.in_args_size = sizeof(caps);
vc_qp->rxq.crc_disable = rxq[i]->crc_len != 0 ? 1 : 0;
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
if (vf->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
- vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
- vc_qp->rxq.rxdid = rxq[i]->rxdid;
- PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
- vc_qp->rxq.rxdid, i);
- } else {
- PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
- "request default RXDID[%d] in Queue[%d]",
- rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
- vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ if (vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
+ vc_qp->rxq.rxdid = rxq[i]->rxdid;
+ PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
+ vc_qp->rxq.rxdid, i);
+ } else {
+ PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
+ "request default RXDID[%d] in Queue[%d]",
+ rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
+ vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
+ }
+
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP &&
+ vf->ptp_caps & VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)
+ vc_qp->rxq.flags |= VIRTCHNL_PTP_RX_TSTAMP;
}
#else
if (vf->vf_res->vf_cap_flags &
return 0;
}
+
+int
+iavf_get_ptp_cap(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_ptp_caps ptp_caps;
+ struct iavf_cmd_info args;
+ int err;
+
+ ptp_caps.caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
+ VIRTCHNL_1588_PTP_CAP_READ_PHC;
+
+ args.ops = VIRTCHNL_OP_1588_PTP_GET_CAPS;
+ args.in_args = (uint8_t *)&ptp_caps;
+ args.in_args_size = sizeof(ptp_caps);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args, 0);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_1588_PTP_GET_CAPS");
+ return err;
+ }
+
+ vf->ptp_caps = ((struct virtchnl_ptp_caps *)args.out_buffer)->caps;
+
+ return 0;
+}
+
+int
+iavf_get_phc_time(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_phc_time phc_time;
+ struct iavf_cmd_info args;
+ int err;
+
+ args.ops = VIRTCHNL_OP_1588_PTP_GET_TIME;
+ args.in_args = (uint8_t *)&phc_time;
+ args.in_args_size = sizeof(phc_time);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args, 0);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
+ return err;
+ }
+
+ adapter->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
+
+ return 0;
+}