1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_ethdev_driver.h>
7 #include "otx2_ethdev.h"
9 #define PTP_FREQ_ADJUST (1 << 9)
11 /* Function to enable ptp config for VFs */
13 otx2_nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
15 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
17 if (otx2_nix_recalc_mtu(eth_dev))
18 otx2_err("Failed to set MTU size for ptp");
20 dev->scalar_ena = true;
21 dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
23 /* Setting up the function pointers as per new offload flags */
24 otx2_eth_set_rx_function(eth_dev);
25 otx2_eth_set_tx_function(eth_dev);
29 nix_eth_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
31 struct otx2_eth_rxq *rxq = queue;
32 struct rte_eth_dev *eth_dev;
37 eth_dev = rxq->eth_dev;
38 otx2_nix_ptp_enable_vf(eth_dev);
44 nix_read_raw_clock(struct otx2_eth_dev *dev, uint64_t *clock, uint64_t *tsc,
47 struct otx2_mbox *mbox = dev->mbox;
52 req = otx2_mbox_alloc_msg_ptp_op(mbox);
53 req->op = PTP_OP_GET_CLOCK;
55 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
68 /* This function calculates two parameters "clk_freq_mult" and
69 * "clk_delta" which is useful in deriving PTP HI clock from
70 * timestamp counter (tsc) value.
73 otx2_nix_raw_clock_tsc_conv(struct otx2_eth_dev *dev)
75 uint64_t ticks_base = 0, ticks = 0, tsc = 0, t_freq;
78 /* Calculating the frequency at which PTP HI clock is running */
79 rc = nix_read_raw_clock(dev, &ticks_base, &tsc, false);
81 otx2_err("Failed to read the raw clock value: %d", rc);
87 rc = nix_read_raw_clock(dev, &ticks, &tsc, false);
89 otx2_err("Failed to read the raw clock value: %d", rc);
93 t_freq = (ticks - ticks_base) * 10;
95 /* Calculating the freq multiplier viz the ratio between the
96 * frequency at which PTP HI clock works and tsc clock runs
99 (double)pow(10, floor(log10(t_freq))) / rte_get_timer_hz();
102 #ifdef RTE_ARM_EAL_RDTSC_USE_PMU
105 rc = nix_read_raw_clock(dev, &ticks, &tsc, val);
107 otx2_err("Failed to read the raw clock value: %d", rc);
111 /* Calculating delta between PTP HI clock and tsc */
112 dev->clk_delta = ((uint64_t)(ticks / dev->clk_freq_mult) - tsc);
119 nix_start_timecounters(struct rte_eth_dev *eth_dev)
121 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
123 memset(&dev->systime_tc, 0, sizeof(struct rte_timecounter));
124 memset(&dev->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
125 memset(&dev->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
127 dev->systime_tc.cc_mask = OTX2_CYCLECOUNTER_MASK;
128 dev->rx_tstamp_tc.cc_mask = OTX2_CYCLECOUNTER_MASK;
129 dev->tx_tstamp_tc.cc_mask = OTX2_CYCLECOUNTER_MASK;
133 nix_ptp_config(struct rte_eth_dev *eth_dev, int en)
135 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
136 struct otx2_mbox *mbox = dev->mbox;
137 uint8_t rc = -EINVAL;
139 if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev))
143 /* Enable time stamping of sent PTP packets. */
144 otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(mbox);
145 rc = otx2_mbox_process(mbox);
147 otx2_err("MBOX ptp tx conf enable failed: err %d", rc);
150 /* Enable time stamping of received PTP packets. */
151 otx2_mbox_alloc_msg_cgx_ptp_rx_enable(mbox);
153 /* Disable time stamping of sent PTP packets. */
154 otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(mbox);
155 rc = otx2_mbox_process(mbox);
157 otx2_err("MBOX ptp tx conf disable failed: err %d", rc);
160 /* Disable time stamping of received PTP packets. */
161 otx2_mbox_alloc_msg_cgx_ptp_rx_disable(mbox);
164 return otx2_mbox_process(mbox);
168 otx2_eth_dev_ptp_info_update(struct otx2_dev *dev, bool ptp_en)
170 struct otx2_eth_dev *otx2_dev = (struct otx2_eth_dev *)dev;
171 struct rte_eth_dev *eth_dev;
177 eth_dev = otx2_dev->eth_dev;
181 otx2_dev->ptp_en = ptp_en;
182 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
183 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[i];
184 rxq->mbuf_initializer =
185 otx2_nix_rxq_mbuf_setup(otx2_dev,
186 eth_dev->data->port_id);
188 if (otx2_dev_is_vf(otx2_dev) && !(otx2_dev_is_sdp(otx2_dev)) &&
189 !(otx2_dev_is_lbk(otx2_dev))) {
190 /* In case of VF, setting of MTU cant be done directly in this
191 * function as this is running as part of MBOX request(PF->VF)
192 * and MTU setting also requires MBOX message to be
195 eth_dev->rx_pkt_burst = nix_eth_ptp_vf_burst;
203 otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev)
205 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
208 /* If we are VF/SDP/LBK, ptp cannot not be enabled */
209 if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev)) {
210 otx2_info("PTP cannot be enabled in case of VF/SDP/LBK");
214 if (otx2_ethdev_is_ptp_en(dev)) {
215 otx2_info("PTP mode is already enabled");
219 if (!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)) {
220 otx2_err("Ptype offload is disabled, it should be enabled");
224 if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
225 otx2_err("Both PTP and switch header enabled");
229 /* Allocating a iova address for tx tstamp */
230 const struct rte_memzone *ts;
231 ts = rte_eth_dma_zone_reserve(eth_dev, "otx2_ts",
232 0, OTX2_ALIGN, OTX2_ALIGN,
235 otx2_err("Failed to allocate mem for tx tstamp addr");
239 dev->tstamp.tx_tstamp_iova = ts->iova;
240 dev->tstamp.tx_tstamp = ts->addr;
242 /* System time should be already on by default */
243 nix_start_timecounters(eth_dev);
245 dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
246 dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
247 dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
249 rc = nix_ptp_config(eth_dev, 1);
251 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
252 struct otx2_eth_txq *txq = eth_dev->data->tx_queues[i];
253 otx2_nix_form_default_desc(txq);
256 /* Setting up the function pointers as per new offload flags */
257 otx2_eth_set_rx_function(eth_dev);
258 otx2_eth_set_tx_function(eth_dev);
261 rc = otx2_nix_recalc_mtu(eth_dev);
263 otx2_err("Failed to set MTU size for ptp");
269 otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev)
271 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
274 if (!otx2_ethdev_is_ptp_en(dev)) {
275 otx2_nix_dbg("PTP mode is disabled");
279 if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev))
282 dev->rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
283 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
284 dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
286 rc = nix_ptp_config(eth_dev, 0);
288 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
289 struct otx2_eth_txq *txq = eth_dev->data->tx_queues[i];
290 otx2_nix_form_default_desc(txq);
293 /* Setting up the function pointers as per new offload flags */
294 otx2_eth_set_rx_function(eth_dev);
295 otx2_eth_set_tx_function(eth_dev);
298 rc = otx2_nix_recalc_mtu(eth_dev);
300 otx2_err("Failed to set MTU size for ptp");
306 otx2_nix_timesync_read_rx_timestamp(struct rte_eth_dev *eth_dev,
307 struct timespec *timestamp,
308 uint32_t __rte_unused flags)
310 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
311 struct otx2_timesync_info *tstamp = &dev->tstamp;
314 if (!tstamp->rx_ready)
317 ns = rte_timecounter_update(&dev->rx_tstamp_tc, tstamp->rx_tstamp);
318 *timestamp = rte_ns_to_timespec(ns);
319 tstamp->rx_ready = 0;
321 otx2_nix_dbg("rx timestamp: %"PRIu64" sec: %"PRIu64" nsec %"PRIu64"",
322 (uint64_t)tstamp->rx_tstamp, (uint64_t)timestamp->tv_sec,
323 (uint64_t)timestamp->tv_nsec);
329 otx2_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev,
330 struct timespec *timestamp)
332 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
333 struct otx2_timesync_info *tstamp = &dev->tstamp;
336 if (*tstamp->tx_tstamp == 0)
339 ns = rte_timecounter_update(&dev->tx_tstamp_tc, *tstamp->tx_tstamp);
340 *timestamp = rte_ns_to_timespec(ns);
342 otx2_nix_dbg("tx timestamp: %"PRIu64" sec: %"PRIu64" nsec %"PRIu64"",
343 *tstamp->tx_tstamp, (uint64_t)timestamp->tv_sec,
344 (uint64_t)timestamp->tv_nsec);
346 *tstamp->tx_tstamp = 0;
353 otx2_nix_timesync_adjust_time(struct rte_eth_dev *eth_dev, int64_t delta)
355 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
356 struct otx2_mbox *mbox = dev->mbox;
361 /* Adjust the frequent to make tics increments in 10^9 tics per sec */
362 if (delta < PTP_FREQ_ADJUST && delta > -PTP_FREQ_ADJUST) {
363 req = otx2_mbox_alloc_msg_ptp_op(mbox);
364 req->op = PTP_OP_ADJFINE;
365 req->scaled_ppm = delta;
367 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
370 /* Since the frequency of PTP comp register is tuned, delta and
371 * freq mult calculation for deriving PTP_HI from timestamp
372 * counter should be done again.
374 rc = otx2_nix_raw_clock_tsc_conv(dev);
376 otx2_err("Failed to calculate delta and freq mult");
378 dev->systime_tc.nsec += delta;
379 dev->rx_tstamp_tc.nsec += delta;
380 dev->tx_tstamp_tc.nsec += delta;
386 otx2_nix_timesync_write_time(struct rte_eth_dev *eth_dev,
387 const struct timespec *ts)
389 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
392 ns = rte_timespec_to_ns(ts);
393 /* Set the time counters to a new value. */
394 dev->systime_tc.nsec = ns;
395 dev->rx_tstamp_tc.nsec = ns;
396 dev->tx_tstamp_tc.nsec = ns;
402 otx2_nix_timesync_read_time(struct rte_eth_dev *eth_dev, struct timespec *ts)
404 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
405 struct otx2_mbox *mbox = dev->mbox;
411 req = otx2_mbox_alloc_msg_ptp_op(mbox);
412 req->op = PTP_OP_GET_CLOCK;
413 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
417 ns = rte_timecounter_update(&dev->systime_tc, rsp->clk);
418 *ts = rte_ns_to_timespec(ns);
420 otx2_nix_dbg("PTP time read: %"PRIu64" .%09"PRIu64"",
421 (uint64_t)ts->tv_sec, (uint64_t)ts->tv_nsec);
428 otx2_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock)
430 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
432 /* This API returns the raw PTP HI clock value. Since LFs doesn't
433 * have direct access to PTP registers and it requires mbox msg
434 * to AF for this value. In fastpath reading this value for every
435 * packet (which involes mbox call) becomes very expensive, hence
436 * we should be able to derive PTP HI clock value from tsc by
437 * using freq_mult and clk_delta calculated during configure stage.
439 *clock = (rte_get_tsc_cycles() + dev->clk_delta) * dev->clk_freq_mult;