volatile uint32_t *cq_db; /* Completion queue doorbell. */
uint16_t port_id; /* Port ID of device. */
uint16_t idx; /* Queue index. */
+ uint64_t ts_mask; /* Timestamp flag dynamic mask. */
+ int32_t ts_offset; /* Timestamp field dynamic offset. */
+ struct mlx5_dev_ctx_shared *sh; /* Shared context. */
struct mlx5_txq_stats stats; /* TX queue counters. */
#ifndef RTE_ARCH_64
rte_spinlock_t *uar_lock;
void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl);
uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
+void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
/* mlx5_rxtx.c */
}
/* Set a mask and offset of dynamic metadata flows into Rx queues*/
mlx5_flow_rxq_dynf_metadata_set(dev);
+ /* Set a mask and offset of scheduling on timestamp into Tx queues*/
+ mlx5_txq_dynf_timestamp_set(dev);
/*
* In non-cached mode, it only needs to start the default mreg copy
* action and no flow created by application exists anymore.
}
return ret;
}
+
+/**
+ * Set the Tx queue dynamic timestamp (mask and offset)
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ */
+void
+mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_txq_data *data;
+ int off, nbit;
+ unsigned int i;
+ uint64_t mask = 0;
+
+ nbit = rte_mbuf_dynflag_lookup
+ (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
+ off = rte_mbuf_dynfield_lookup
+ (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
+ if (nbit > 0 && off >= 0 && sh->txpp.refcnt)
+ mask = 1ULL << nbit;
+ for (i = 0; i != priv->txqs_n; ++i) {
+ data = (*priv->txqs)[i];
+ if (!data)
+ continue;
+ data->sh = sh;
+ data->ts_mask = mask;
+ data->ts_offset = off;
+ }
+}