}
}
-static int
+int
nix_recalc_mtu(struct rte_eth_dev *eth_dev)
{
struct rte_eth_dev_data *data = eth_dev->data;
offsetof(struct rte_mbuf, data_off) !=
6);
mb_def.nb_segs = 1;
- mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM +
+ (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
mb_def.port = port_id;
rte_mbuf_refcnt_set(&mb_def, 1);
eth_dev->data->rx_queues[qid] = rxq_sp + 1;
eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+ /* Calculating delta and freq mult between PTP HI clock and tsc.
+ * These are needed in deriving raw clock value from tsc counter.
+ * read_clock eth op returns raw clock value.
+ */
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+ rc = cnxk_nix_tsc_convert(dev);
+ if (rc) {
+ plt_err("Failed to calculate delta and freq mult");
+ goto rq_fini;
+ }
+ }
+
return 0;
rq_fini:
rc |= roc_nix_rq_fini(rq);
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
struct rte_mbuf *rx_pkts[32];
+ struct rte_eth_link link;
int count, i, j, rc;
void *rxq;
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
dev_ops->tx_queue_stop(eth_dev, i);
+ /* Bring down link status internally */
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(eth_dev, &link);
+
return 0;
}
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
int rc, i;
- if (eth_dev->data->nb_rx_queues != 0) {
+ if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
rc = nix_recalc_mtu(eth_dev);
if (rc)
return rc;
}
}
+ /* Enable PTP if it is requested by the user or already
+ * enabled on PF owning this VF
+ */
+ memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+ cnxk_eth_dev_ops.timesync_enable(eth_dev);
+ else
+ cnxk_eth_dev_ops.timesync_disable(eth_dev);
+
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ rc = rte_mbuf_dyn_rx_timestamp_register
+ (&dev->tstamp.tstamp_dynfield_offset,
+ &dev->tstamp.rx_tstamp_dynflag);
+ if (rc != 0) {
+ plt_err("Failed to register Rx timestamp field/flag");
+ goto rx_disable;
+ }
+ }
+
cnxk_nix_toggle_flag_link_cfg(dev, false);
return 0;
.xstats_reset = cnxk_nix_xstats_reset,
.xstats_get_by_id = cnxk_nix_xstats_get_by_id,
.xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
+ .fw_version_get = cnxk_nix_fw_version_get,
.rxq_info_get = cnxk_nix_rxq_info_get,
.txq_info_get = cnxk_nix_txq_info_get,
.tx_done_cleanup = cnxk_nix_tx_done_cleanup,
.flow_ops_get = cnxk_nix_flow_ops_get,
+ .get_reg = cnxk_nix_dev_get_reg,
+ .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
+ .timesync_read_time = cnxk_nix_timesync_read_time,
+ .timesync_write_time = cnxk_nix_timesync_write_time,
+ .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
+ .read_clock = cnxk_nix_read_clock,
+ .reta_update = cnxk_nix_reta_update,
+ .reta_query = cnxk_nix_reta_query,
+ .rss_hash_update = cnxk_nix_rss_hash_update,
+ .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
+ .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
+ .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
+ .tm_ops_get = cnxk_nix_tm_ops_get,
};
static int
/* Initialize base roc nix */
nix->pci_dev = pci_dev;
+ nix->hw_vlan_ins = true;
rc = roc_nix_dev_init(nix);
if (rc) {
plt_err("Failed to initialize roc nix rc=%d", rc);
/* Register up msg callbacks */
roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
+ /* Register up msg callbacks */
+ roc_nix_mac_link_info_get_cb_register(nix,
+ cnxk_eth_dev_link_status_get_cb);
+
dev->eth_dev = eth_dev;
dev->configured = 0;
dev->ptype_disable = 0;
}
dev->max_mac_entries = max_entries;
+ dev->dmac_filter_count = 1;
/* Get mac address */
rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
/* Disable link status events */
roc_nix_mac_link_event_start_stop(nix, false);
+ /* Unregister the link update op, this is required to stop VFs from
+ * receiving link status updates on exit path.
+ */
+ roc_nix_mac_link_cb_unregister(nix);
+
/* Free up SQs */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);