if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
flags |= NIX_RX_MULTI_SEG_F;
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_TX_OFFLOAD_TSTAMP_F;
+
return flags;
}
{
struct nix_send_ext_s *send_hdr_ext;
union nix_send_hdr_w0_u send_hdr_w0;
+ struct nix_send_mem_s *send_mem;
union nix_send_sg_s sg_w0;
RTE_SET_USED(dev);
send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[0];
send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
+ if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ /* Default: one seg packet would have:
+ * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
+ * => 8/2 - 1 = 3
+ */
+ send_hdr_w0.sizem1 = 3;
+ send_hdr_ext->w0.tstmp = 1;
+
+ /* To calculate the offset for send_mem,
+ * send_hdr->w0.sizem1 * 2
+ */
+ send_mem = (struct nix_send_mem_s *)(txq->cmd + 2);
+ send_mem->w0.subdc = NIX_SUBDC_MEM;
+ send_mem->w0.alg = NIX_SENDMEMALG_SETTSTMP;
+ send_mem->addr = dev->tstamp.tx_tstamp_iova;
+ }
} else {
/* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
send_hdr_w0.sizem1 = 1;
rxq->wdata = cq->wdata;
rxq->head = cq->head;
rxq->qmask = cq->qmask;
+ rxq->tstamp = &dev->tstamp;
/* Data offset from data to start of mbuf is first_skip */
rxq->data_off = rq->first_skip;
return 0;
}
+/* Function to enable ptp config for VFs */
+static void
+nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+ if (nix_recalc_mtu(eth_dev))
+ plt_err("Failed to set MTU size for ptp");
+
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+
+ /* Setting up the function pointers as per new offload flags */
+ cn10k_eth_set_rx_function(eth_dev);
+ cn10k_eth_set_tx_function(eth_dev);
+}
+
+static uint16_t
+nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
+{
+ struct cn10k_eth_rxq *rxq = queue;
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ struct rte_eth_dev *eth_dev;
+
+ RTE_SET_USED(mbufs);
+ RTE_SET_USED(pkts);
+
+ rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+ eth_dev = rxq_sp->dev->eth_dev;
+ nix_ptp_enable_vf(eth_dev);
+
+ return 0;
+}
+
+static int
+cn10k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
+{
+ struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix;
+ struct rte_eth_dev *eth_dev;
+ struct cn10k_eth_rxq *rxq;
+ int i;
+
+ if (!dev)
+ return -EINVAL;
+
+ eth_dev = dev->eth_dev;
+ if (!eth_dev)
+ return -EINVAL;
+
+ dev->ptp_en = ptp_en;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rxq = eth_dev->data->rx_queues[i];
+ rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
+ }
+
+ if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) &&
+ !(roc_nix_is_lbk(nix))) {
+ /* In case of VF, setting of MTU cannot be done directly in this
+ * function as this is running as part of MBOX request(PF->VF)
+ * and MTU setting also requires MBOX message to be
+ * sent(VF->PF)
+ */
+ eth_dev->rx_pkt_burst = nix_ptp_vf_burst;
+ rte_mb();
+ }
+
+ return 0;
+}
+
+static int
+cn10k_nix_timesync_enable(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int i, rc;
+
+ rc = cnxk_nix_timesync_enable(eth_dev);
+ if (rc)
+ return rc;
+
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+ dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
+
+ /* Setting up the rx[tx]_offload_flags due to change
+ * in rx[tx]_offloads.
+ */
+ cn10k_eth_set_rx_function(eth_dev);
+ cn10k_eth_set_tx_function(eth_dev);
+ return 0;
+}
+
+static int
+cn10k_nix_timesync_disable(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int i, rc;
+
+ rc = cnxk_nix_timesync_disable(eth_dev);
+ if (rc)
+ return rc;
+
+ dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
+ dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
+
+ /* Setting up the rx[tx]_offload_flags due to change
+ * in rx[tx]_offloads.
+ */
+ cn10k_eth_set_rx_function(eth_dev);
+ cn10k_eth_set_tx_function(eth_dev);
+ return 0;
+}
+
static int
cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
int rc;
/* Common eth dev start */
if (rc)
return rc;
+ /* Update VF about data off shifted by 8 bytes if PTP already
+ * enabled in PF owning this VF
+ */
+ if (dev->ptp_en && (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))))
+ nix_ptp_enable_vf(eth_dev);
+
/* Setting up the rx[tx]_offload_flags due to change
* in rx[tx]_offloads.
*/
cnxk_eth_dev_ops.tx_queue_stop = cn10k_nix_tx_queue_stop;
cnxk_eth_dev_ops.dev_start = cn10k_nix_dev_start;
cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set;
+ cnxk_eth_dev_ops.timesync_enable = cn10k_nix_timesync_enable;
+ cnxk_eth_dev_ops.timesync_disable = cn10k_nix_timesync_disable;
}
static void
cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
{
struct rte_eth_dev *eth_dev;
+ struct cnxk_eth_dev *dev;
int rc;
if (RTE_CACHE_LINE_SIZE != 64) {
if (rc)
return rc;
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
- if (!eth_dev)
- return -ENOENT;
+ /* Find eth dev allocated */
+ eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!eth_dev)
+ return -ENOENT;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
/* Setup callbacks for secondary process */
cn10k_eth_set_tx_function(eth_dev);
cn10k_eth_set_rx_function(eth_dev);
+ return 0;
}
+
+ dev = cnxk_eth_pmd_priv(eth_dev);
+
+ /* Register up msg callbacks for PTP information */
+ roc_nix_ptp_info_cb_register(&dev->nix, cn10k_nix_ptp_info_update_cb);
+
return 0;
}