goto error;
#endif
}
+ if (config.devx) {
+ uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
+
+ err = mlx5_devx_cmd_register_read
+ (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
+ reg, MLX5_ST_SZ_DW(register_mtutc));
+ if (!err) {
+ uint32_t ts_mode;
+
+ /* MTUTC register is read successfully. */
+ ts_mode = MLX5_GET(register_mtutc, reg,
+ time_stamp_mode);
+ if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
+ config.rt_timestamp = 1;
+ } else {
+ /* Kernel does not support register reading. */
+ if (config.hca_attr.dev_freq_khz ==
+ (NS_PER_S / MS_PER_S))
+ config.rt_timestamp = 1;
+ }
+ }
if (config.mprq.enabled && mprq) {
if (config.mprq.stride_num_n &&
(config.mprq.stride_num_n > mprq_max_stride_num_n ||
unsigned int devx:1; /* Whether devx interface is available or not. */
unsigned int dest_tir:1; /* Whether advanced DR API is available. */
unsigned int reclaim_mode:2; /* Memory reclaim mode. */
+ unsigned int rt_timestamp:1; /* realtime timestamp format. */
struct {
unsigned int enabled:1; /* Whether MPRQ is enabled. */
unsigned int stride_num_n; /* Number of strides. */
priv->drop_queue.hrxq = NULL;
}
}
+
+
+/**
+ * Set the Rx queue timestamp conversion parameters
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ */
+void
+mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_rxq_data *data;
+ unsigned int i;
+
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ if (!(*priv->rxqs)[i])
+ continue;
+ data = (*priv->rxqs)[i];
+ data->sh = sh;
+ data->rt_timestamp = priv->config.rt_timestamp;
+ }
+}
pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
}
if (rxq->hw_timestamp) {
- pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
+ uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
+
+ if (rxq->rt_timestamp)
+ ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
+ pkt->timestamp = ts;
pkt->ol_flags |= PKT_RX_TIMESTAMP;
}
}
struct mlx5_rxq_data {
unsigned int csum:1; /* Enable checksum offloading. */
unsigned int hw_timestamp:1; /* Enable HW timestamp. */
+ unsigned int rt_timestamp:1; /* Realtime timestamp format. */
unsigned int vlan_strip:1; /* Enable VLAN stripping. */
unsigned int crc_present:1; /* CRC must be subtracted. */
unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */
struct rte_mempool *mp;
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
+ struct mlx5_dev_ctx_shared *sh; /* Shared context. */
uint16_t idx; /* Queue index. */
struct mlx5_rxq_stats stats;
rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
uint64_t mlx5_get_rx_port_offloads(void);
uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
+void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev);
+
/* mlx5_txq.c */
/* D.5 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
if (rxq->hw_timestamp) {
- pkts[pos]->timestamp =
- rte_be_to_cpu_64(cq[pos].timestamp);
- pkts[pos + 1]->timestamp =
- rte_be_to_cpu_64(cq[pos + p1].timestamp);
- pkts[pos + 2]->timestamp =
- rte_be_to_cpu_64(cq[pos + p2].timestamp);
- pkts[pos + 3]->timestamp =
- rte_be_to_cpu_64(cq[pos + p3].timestamp);
+ if (rxq->rt_timestamp) {
+ struct mlx5_dev_ctx_shared *sh = rxq->sh;
+ uint64_t ts;
+
+ ts = rte_be_to_cpu_64(cq[pos].timestamp);
+ pkts[pos]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64(cq[pos + p1].timestamp);
+ pkts[pos + 1]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64(cq[pos + p2].timestamp);
+ pkts[pos + 2]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64(cq[pos + p3].timestamp);
+ pkts[pos + 3]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ } else {
+ pkts[pos]->timestamp = rte_be_to_cpu_64
+ (cq[pos].timestamp);
+ pkts[pos + 1]->timestamp = rte_be_to_cpu_64
+ (cq[pos + p1].timestamp);
+ pkts[pos + 2]->timestamp = rte_be_to_cpu_64
+ (cq[pos + p2].timestamp);
+ pkts[pos + 3]->timestamp = rte_be_to_cpu_64
+ (cq[pos + p3].timestamp);
+ }
}
if (rxq->dynf_meta) {
uint64_t flag = rxq->flow_meta_mask;
rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
opcode, &elts[pos]);
if (rxq->hw_timestamp) {
- elts[pos]->timestamp =
- rte_be_to_cpu_64(
- container_of(p0, struct mlx5_cqe,
- pkt_info)->timestamp);
- elts[pos + 1]->timestamp =
- rte_be_to_cpu_64(
- container_of(p1, struct mlx5_cqe,
- pkt_info)->timestamp);
- elts[pos + 2]->timestamp =
- rte_be_to_cpu_64(
- container_of(p2, struct mlx5_cqe,
- pkt_info)->timestamp);
- elts[pos + 3]->timestamp =
- rte_be_to_cpu_64(
- container_of(p3, struct mlx5_cqe,
- pkt_info)->timestamp);
+ if (rxq->rt_timestamp) {
+ struct mlx5_dev_ctx_shared *sh = rxq->sh;
+ uint64_t ts;
+
+ ts = rte_be_to_cpu_64
+ (container_of(p0, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64
+ (container_of(p1, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 1]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64
+ (container_of(p2, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 2]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64
+ (container_of(p3, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 3]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ } else {
+ elts[pos]->timestamp = rte_be_to_cpu_64
+ (container_of(p0, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 1]->timestamp = rte_be_to_cpu_64
+ (container_of(p1, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 2]->timestamp = rte_be_to_cpu_64
+ (container_of(p2, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 3]->timestamp = rte_be_to_cpu_64
+ (container_of(p3, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ }
}
if (!!rxq->flow_meta_mask) {
/* This code is subject for futher optimization. */
/* D.5 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
if (rxq->hw_timestamp) {
- pkts[pos]->timestamp =
- rte_be_to_cpu_64(cq[pos].timestamp);
- pkts[pos + 1]->timestamp =
- rte_be_to_cpu_64(cq[pos + p1].timestamp);
- pkts[pos + 2]->timestamp =
- rte_be_to_cpu_64(cq[pos + p2].timestamp);
- pkts[pos + 3]->timestamp =
- rte_be_to_cpu_64(cq[pos + p3].timestamp);
+ if (rxq->rt_timestamp) {
+ struct mlx5_dev_ctx_shared *sh = rxq->sh;
+ uint64_t ts;
+
+ ts = rte_be_to_cpu_64(cq[pos].timestamp);
+ pkts[pos]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64(cq[pos + p1].timestamp);
+ pkts[pos + 1]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64(cq[pos + p2].timestamp);
+ pkts[pos + 2]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64(cq[pos + p3].timestamp);
+ pkts[pos + 3]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ } else {
+ pkts[pos]->timestamp = rte_be_to_cpu_64
+ (cq[pos].timestamp);
+ pkts[pos + 1]->timestamp = rte_be_to_cpu_64
+ (cq[pos + p1].timestamp);
+ pkts[pos + 2]->timestamp = rte_be_to_cpu_64
+ (cq[pos + p2].timestamp);
+ pkts[pos + 3]->timestamp = rte_be_to_cpu_64
+ (cq[pos + p3].timestamp);
+ }
}
if (rxq->dynf_meta) {
/* This code is subject for futher optimization. */
dev->data->port_id);
goto error;
}
- /* Set a mask and offset of dynamic metadata flows into Rx queues*/
+ /* Set a mask and offset of dynamic metadata flows into Rx queues. */
mlx5_flow_rxq_dynf_metadata_set(dev);
- /* Set a mask and offset of scheduling on timestamp into Tx queues*/
+ /* Set flags and context to convert Rx timestamps. */
+ mlx5_rxq_timestamp_set(dev);
+ /* Set a mask and offset of scheduling on timestamp into Tx queues. */
mlx5_txq_dynf_timestamp_set(dev);
/*
* In non-cached mode, it only needs to start the default mreg copy