{
uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
- if (roc_nix_is_vf_or_sdp(&dev->nix))
+ if (roc_nix_is_vf_or_sdp(&dev->nix) ||
+ dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
return capa;
return speed_capa;
}
+static void
+nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
+{
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct rte_eth_dev *eth_dev;
+ struct cnxk_eth_dev *dev;
+ uint32_t buffsz;
+
+ dev = rxq->dev;
+ eth_dev = dev->eth_dev;
+
+ /* Get rx buffer size */
+ mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+ dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ }
+}
+
+int
+nix_recalc_mtu(struct rte_eth_dev *eth_dev)
+{
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct cnxk_eth_rxq_sp *rxq;
+ uint16_t mtu;
+ int rc;
+
+ rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
+ /* Setup scatter mode if needed by jumbo */
+ nix_enable_mseg_on_jumbo(rxq);
+
+ /* Setup MTU based on max_rx_pkt_len */
+ mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
+ CNXK_NIX_MAX_VTAG_ACT_SIZE;
+
+ rc = cnxk_nix_mtu_set(eth_dev, mtu);
+ if (rc)
+ plt_err("Failed to set default MTU size, rc=%d", rc);
+
+ return rc;
+}
+
+static int
+nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+ struct rte_eth_fc_conf fc_conf = {0};
+ int rc;
+
+ /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+ * by AF driver, update those info in PMD structure.
+ */
+ rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
+ if (rc)
+ goto exit;
+
+ fc->mode = fc_conf.mode;
+ fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+ (fc_conf.mode == RTE_FC_RX_PAUSE);
+ fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+ (fc_conf.mode == RTE_FC_TX_PAUSE);
+
+exit:
+ return rc;
+}
+
+static int
+nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+ struct rte_eth_fc_conf fc_cfg = {0};
+
+ if (roc_nix_is_vf_or_sdp(&dev->nix))
+ return 0;
+
+ fc_cfg.mode = fc->mode;
+
+ /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
+ if (roc_model_is_cn96_ax() &&
+ dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
+ (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+ fc_cfg.mode =
+ (fc_cfg.mode == RTE_FC_FULL ||
+ fc_cfg.mode == RTE_FC_TX_PAUSE) ?
+ RTE_FC_TX_PAUSE : RTE_FC_NONE;
+ }
+
+ return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
+}
+
+uint64_t
+cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
+{
+ uint16_t port_id = dev->eth_dev->data->port_id;
+ struct rte_mbuf mb_def;
+ uint64_t *tmp;
+
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
+ offsetof(struct rte_mbuf, data_off) !=
+ 2);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
+ offsetof(struct rte_mbuf, data_off) !=
+ 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
+ offsetof(struct rte_mbuf, data_off) !=
+ 6);
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM +
+ (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
+ mb_def.port = port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* Prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ tmp = (uint64_t *)&mb_def.rearm_data;
+
+ return *tmp;
+}
+
+static inline uint8_t
+nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
+{
+ /*
+ * Maximum three segments can be supported with W8, Choose
+ * NIX_MAXSQESZ_W16 for multi segment offload.
+ */
+ if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ return NIX_MAXSQESZ_W16;
+ else
+ return NIX_MAXSQESZ_W8;
+}
+
+int
+cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint16_t nb_desc, uint16_t fp_tx_q_sz,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+ struct cnxk_eth_txq_sp *txq_sp;
+ struct roc_nix_sq *sq;
+ size_t txq_sz;
+ int rc;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (eth_dev->data->tx_queues[qid] != NULL) {
+ plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
+ dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
+ eth_dev->data->tx_queues[qid] = NULL;
+ }
+
+ /* Setup ROC SQ */
+ sq = &dev->sqs[qid];
+ sq->qid = qid;
+ sq->nb_desc = nb_desc;
+ sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
+
+ rc = roc_nix_sq_init(&dev->nix, sq);
+ if (rc) {
+ plt_err("Failed to init sq=%d, rc=%d", qid, rc);
+ return rc;
+ }
+
+ rc = -ENOMEM;
+ txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
+ txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
+ if (!txq_sp) {
+ plt_err("Failed to alloc tx queue mem");
+ rc |= roc_nix_sq_fini(sq);
+ return rc;
+ }
+
+ txq_sp->dev = dev;
+ txq_sp->qid = qid;
+ txq_sp->qconf.conf.tx = *tx_conf;
+ txq_sp->qconf.nb_desc = nb_desc;
+
+ plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
+ " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
+ qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
+ sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
+
+ /* Store start of fast path area */
+ eth_dev->data->tx_queues[qid] = txq_sp + 1;
+ eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static void
+cnxk_nix_tx_queue_release(void *txq)
+{
+ struct cnxk_eth_txq_sp *txq_sp;
+ struct cnxk_eth_dev *dev;
+ struct roc_nix_sq *sq;
+ uint16_t qid;
+ int rc;
+
+ if (!txq)
+ return;
+
+ txq_sp = cnxk_eth_txq_to_sp(txq);
+ dev = txq_sp->dev;
+ qid = txq_sp->qid;
+
+ plt_nix_dbg("Releasing txq %u", qid);
+
+ /* Cleanup ROC SQ */
+ sq = &dev->sqs[qid];
+ rc = roc_nix_sq_fini(sq);
+ if (rc)
+ plt_err("Failed to cleanup sq, rc=%d", rc);
+
+ /* Finally free */
+ plt_free(txq_sp);
+}
+
+int
+cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint16_t nb_desc, uint16_t fp_rx_q_sz,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ struct rte_mempool_ops *ops;
+ const char *platform_ops;
+ struct roc_nix_rq *rq;
+ struct roc_nix_cq *cq;
+ uint16_t first_skip;
+ int rc = -EINVAL;
+ size_t rxq_sz;
+
+ /* Sanity checks */
+ if (rx_conf->rx_deferred_start == 1) {
+ plt_err("Deferred Rx start is not supported");
+ goto fail;
+ }
+
+ platform_ops = rte_mbuf_platform_mempool_ops();
+ /* This driver needs cnxk_npa mempool ops to work */
+ ops = rte_mempool_get_ops(mp->ops_index);
+ if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
+ plt_err("mempool ops should be of cnxk_npa type");
+ goto fail;
+ }
+
+ if (mp->pool_id == 0) {
+ plt_err("Invalid pool_id");
+ goto fail;
+ }
+
+ /* Free memory prior to re-allocation if needed */
+ if (eth_dev->data->rx_queues[qid] != NULL) {
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+
+ plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
+ dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
+ eth_dev->data->rx_queues[qid] = NULL;
+ }
+
+ /* Setup ROC CQ */
+ cq = &dev->cqs[qid];
+ cq->qid = qid;
+ cq->nb_desc = nb_desc;
+ rc = roc_nix_cq_init(&dev->nix, cq);
+ if (rc) {
+ plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
+ goto fail;
+ }
+
+ /* Setup ROC RQ */
+ rq = &dev->rqs[qid];
+ rq->qid = qid;
+ rq->aura_handle = mp->pool_id;
+ rq->flow_tag_width = 32;
+ rq->sso_ena = false;
+
+ /* Calculate first mbuf skip */
+ first_skip = (sizeof(struct rte_mbuf));
+ first_skip += RTE_PKTMBUF_HEADROOM;
+ first_skip += rte_pktmbuf_priv_size(mp);
+ rq->first_skip = first_skip;
+ rq->later_skip = sizeof(struct rte_mbuf);
+ rq->lpb_size = mp->elt_size;
+
+ rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
+ if (rc) {
+ plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
+ goto cq_fini;
+ }
+
+ /* Allocate and setup fast path rx queue */
+ rc = -ENOMEM;
+ rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
+ rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
+ if (!rxq_sp) {
+ plt_err("Failed to alloc rx queue for rq=%d", qid);
+ goto rq_fini;
+ }
+
+ /* Setup slow path fields */
+ rxq_sp->dev = dev;
+ rxq_sp->qid = qid;
+ rxq_sp->qconf.conf.rx = *rx_conf;
+ rxq_sp->qconf.nb_desc = nb_desc;
+ rxq_sp->qconf.mp = mp;
+
+ plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
+ cq->nb_desc);
+
+ /* Store start of fast path area */
+ eth_dev->data->rx_queues[qid] = rxq_sp + 1;
+ eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ /* Calculating delta and freq mult between PTP HI clock and tsc.
+ * These are needed in deriving raw clock value from tsc counter.
+ * read_clock eth op returns raw clock value.
+ */
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+ rc = cnxk_nix_tsc_convert(dev);
+ if (rc) {
+ plt_err("Failed to calculate delta and freq mult");
+ goto rq_fini;
+ }
+ }
+
+ return 0;
+rq_fini:
+ rc |= roc_nix_rq_fini(rq);
+cq_fini:
+ rc |= roc_nix_cq_fini(cq);
+fail:
+ return rc;
+}
+
+static void
+cnxk_nix_rx_queue_release(void *rxq)
+{
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ struct cnxk_eth_dev *dev;
+ struct roc_nix_rq *rq;
+ struct roc_nix_cq *cq;
+ uint16_t qid;
+ int rc;
+
+ if (!rxq)
+ return;
+
+ rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+ dev = rxq_sp->dev;
+ qid = rxq_sp->qid;
+
+ plt_nix_dbg("Releasing rxq %u", qid);
+
+ /* Cleanup ROC RQ */
+ rq = &dev->rqs[qid];
+ rc = roc_nix_rq_fini(rq);
+ if (rc)
+ plt_err("Failed to cleanup rq, rc=%d", rc);
+
+ /* Cleanup ROC CQ */
+ cq = &dev->cqs[qid];
+ rc = roc_nix_cq_fini(cq);
+ if (rc)
+ plt_err("Failed to cleanup cq, rc=%d", rc);
+
+ /* Finally free fast path area */
+ plt_free(rxq_sp);
+}
+
uint32_t
cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
uint8_t rss_level)
dev->ethdev_rss_hf = ethdev_rss;
- if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+ if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+ dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
+ }
if (ethdev_rss & ETH_RSS_C_VLAN)
flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
struct rte_eth_rxmode *rxmode = &conf->rxmode;
struct rte_eth_txmode *txmode = &conf->txmode;
char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
+ struct roc_nix_fc_cfg fc_cfg = {0};
struct roc_nix *nix = &dev->nix;
struct rte_ether_addr *ea;
uint8_t nb_rxq, nb_txq;
goto fail_configure;
}
+ dev->npc.channel = roc_nix_get_base_chan(nix);
+
nb_rxq = data->nb_rx_queues;
nb_txq = data->nb_tx_queues;
rc = -ENOMEM;
roc_nix_err_intr_ena_dis(nix, true);
roc_nix_ras_intr_ena_dis(nix, true);
- if (nix->rx_ptp_ena) {
+ if (nix->rx_ptp_ena &&
+ dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
plt_err("Both PTP and switch header enabled");
goto free_nix_lf;
}
+ rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type);
+ if (rc) {
+ plt_err("Failed to enable switch type nix_lf rc=%d", rc);
+ goto free_nix_lf;
+ }
+
/* Setup LSO if needed */
rc = nix_lso_fmt_setup(dev);
if (rc) {
goto cq_fini;
}
+ /* Init flow control configuration */
+ fc_cfg.cq_cfg_valid = false;
+ fc_cfg.rxchan_cfg.enable = true;
+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
+ if (rc) {
+ plt_err("Failed to initialize flow control rc=%d", rc);
+ goto cq_fini;
+ }
+
+ /* Update flow control configuration to PMD */
+ rc = nix_init_flow_ctrl_config(eth_dev);
+ if (rc) {
+ plt_err("Failed to initialize flow control rc=%d", rc);
+ goto cq_fini;
+ }
/*
* Restore queue config when reconfigure followed by
* reconfigure and no queue configure invoked from application case.
return rc;
}
+int
+cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix_sq *sq = &dev->sqs[qid];
+ int rc = -EINVAL;
+
+ if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ rc = roc_nix_tm_sq_aura_fc(sq, true);
+ if (rc) {
+ plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
+ goto done;
+ }
+
+ data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+done:
+ return rc;
+}
+
+int
+cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix_sq *sq = &dev->sqs[qid];
+ int rc;
+
+ if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ rc = roc_nix_tm_sq_aura_fc(sq, false);
+ if (rc) {
+ plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
+ rc);
+ goto done;
+ }
+
+ data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+done:
+ return rc;
+}
+
+static int
+cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix_rq *rq = &dev->rqs[qid];
+ int rc;
+
+ if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ rc = roc_nix_rq_ena_dis(rq, true);
+ if (rc) {
+ plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
+ goto done;
+ }
+
+ data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+done:
+ return rc;
+}
+
+static int
+cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix_rq *rq = &dev->rqs[qid];
+ int rc;
+
+ if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ rc = roc_nix_rq_ena_dis(rq, false);
+ if (rc) {
+ plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
+ goto done;
+ }
+
+ data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+done:
+ return rc;
+}
+
+static int
+cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+ struct rte_mbuf *rx_pkts[32];
+ int count, i, j, rc;
+ void *rxq;
+
+ /* Disable switch hdr pkind */
+ roc_nix_switch_hdr_set(&dev->nix, 0);
+
+ /* Stop link change events */
+ if (!roc_nix_is_vf_or_sdp(&dev->nix))
+ roc_nix_mac_link_event_start_stop(&dev->nix, false);
+
+ /* Disable Rx via NPC */
+ roc_nix_npc_rx_ena_dis(&dev->nix, false);
+
+ /* Stop rx queues and free up pkts pending */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rc = dev_ops->rx_queue_stop(eth_dev, i);
+ if (rc)
+ continue;
+
+ rxq = eth_dev->data->rx_queues[i];
+ count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
+ while (count) {
+ for (j = 0; j < count; j++)
+ rte_pktmbuf_free(rx_pkts[j]);
+ count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
+ }
+ }
+
+ /* Stop tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ dev_ops->tx_queue_stop(eth_dev, i);
+
+ return 0;
+}
+
+int
+cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc, i;
+
+ if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
+ rc = nix_recalc_mtu(eth_dev);
+ if (rc)
+ return rc;
+ }
+
+ /* Start rx queues */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rc = cnxk_nix_rx_queue_start(eth_dev, i);
+ if (rc)
+ return rc;
+ }
+
+ /* Start tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = cnxk_nix_tx_queue_start(eth_dev, i);
+ if (rc)
+ return rc;
+ }
+
+ /* Update Flow control configuration */
+ rc = nix_update_flow_ctrl_config(eth_dev);
+ if (rc) {
+ plt_err("Failed to enable flow control. error code(%d)", rc);
+ return rc;
+ }
+
+ /* Enable Rx in NPC */
+ rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
+ if (rc) {
+ plt_err("Failed to enable NPC rx %d", rc);
+ return rc;
+ }
+
+ cnxk_nix_toggle_flag_link_cfg(dev, true);
+
+ /* Start link change events */
+ if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
+ rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
+ if (rc) {
+ plt_err("Failed to start cgx link event %d", rc);
+ goto rx_disable;
+ }
+ }
+
+ /* Enable PTP if it is requested by the user or already
+ * enabled on PF owning this VF
+ */
+ memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+ cnxk_eth_dev_ops.timesync_enable(eth_dev);
+ else
+ cnxk_eth_dev_ops.timesync_disable(eth_dev);
+
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ rc = rte_mbuf_dyn_rx_timestamp_register
+ (&dev->tstamp.tstamp_dynfield_offset,
+ &dev->tstamp.rx_tstamp_dynflag);
+ if (rc != 0) {
+ plt_err("Failed to register Rx timestamp field/flag");
+ goto rx_disable;
+ }
+ }
+
+ cnxk_nix_toggle_flag_link_cfg(dev, false);
+
+ return 0;
+
+rx_disable:
+ roc_nix_npc_rx_ena_dis(&dev->nix, false);
+ cnxk_nix_toggle_flag_link_cfg(dev, false);
+ return rc;
+}
+
+static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
+static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
+
/* CNXK platform independent eth dev ops */
struct eth_dev_ops cnxk_eth_dev_ops = {
+ .mtu_set = cnxk_nix_mtu_set,
+ .mac_addr_add = cnxk_nix_mac_addr_add,
+ .mac_addr_remove = cnxk_nix_mac_addr_del,
+ .mac_addr_set = cnxk_nix_mac_addr_set,
.dev_infos_get = cnxk_nix_info_get,
+ .link_update = cnxk_nix_link_update,
+ .tx_queue_release = cnxk_nix_tx_queue_release,
+ .rx_queue_release = cnxk_nix_rx_queue_release,
+ .dev_stop = cnxk_nix_dev_stop,
+ .dev_close = cnxk_nix_dev_close,
+ .dev_reset = cnxk_nix_dev_reset,
+ .tx_queue_start = cnxk_nix_tx_queue_start,
+ .rx_queue_start = cnxk_nix_rx_queue_start,
+ .rx_queue_stop = cnxk_nix_rx_queue_stop,
+ .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
+ .promiscuous_enable = cnxk_nix_promisc_enable,
+ .promiscuous_disable = cnxk_nix_promisc_disable,
+ .allmulticast_enable = cnxk_nix_allmulticast_enable,
+ .allmulticast_disable = cnxk_nix_allmulticast_disable,
+ .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
+ .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
+ .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
+ .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+ .dev_set_link_up = cnxk_nix_set_link_up,
+ .dev_set_link_down = cnxk_nix_set_link_down,
+ .get_module_info = cnxk_nix_get_module_info,
+ .get_module_eeprom = cnxk_nix_get_module_eeprom,
+ .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
+ .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
+ .pool_ops_supported = cnxk_nix_pool_ops_supported,
+ .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
+ .stats_get = cnxk_nix_stats_get,
+ .stats_reset = cnxk_nix_stats_reset,
+ .xstats_get = cnxk_nix_xstats_get,
+ .xstats_get_names = cnxk_nix_xstats_get_names,
+ .xstats_reset = cnxk_nix_xstats_reset,
+ .xstats_get_by_id = cnxk_nix_xstats_get_by_id,
+ .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
+ .fw_version_get = cnxk_nix_fw_version_get,
+ .rxq_info_get = cnxk_nix_rxq_info_get,
+ .txq_info_get = cnxk_nix_txq_info_get,
+ .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
+ .flow_ops_get = cnxk_nix_flow_ops_get,
+ .get_reg = cnxk_nix_dev_get_reg,
+ .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
+ .timesync_read_time = cnxk_nix_timesync_read_time,
+ .timesync_write_time = cnxk_nix_timesync_write_time,
+ .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
+ .read_clock = cnxk_nix_read_clock,
+ .reta_update = cnxk_nix_reta_update,
+ .reta_query = cnxk_nix_reta_query,
+ .rss_hash_update = cnxk_nix_rss_hash_update,
+ .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
+ .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
};
static int
/* Initialize base roc nix */
nix->pci_dev = pci_dev;
+ nix->hw_vlan_ins = true;
rc = roc_nix_dev_init(nix);
if (rc) {
plt_err("Failed to initialize roc nix rc=%d", rc);
goto error;
}
+ /* Register up msg callbacks */
+ roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
+
dev->eth_dev = eth_dev;
dev->configured = 0;
+ dev->ptype_disable = 0;
/* For vfs, returned max_entries will be 0. but to keep default mac
* address, one entry must be allocated. so setting up to 1.
}
dev->max_mac_entries = max_entries;
+ dev->dmac_filter_count = 1;
/* Get mac address */
rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
dev->speed_capa = nix_get_speed_capa(dev);
/* Initialize roc npc */
+ dev->npc.roc_nix = nix;
+ rc = roc_npc_init(&dev->npc);
+ if (rc)
+ goto free_mac_addrs;
+
plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
" rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
eth_dev->data->port_id, roc_nix_get_pf(nix),
}
static int
-cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
+cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
roc_nix_npc_rx_ena_dis(nix, false);
+ /* Disable and free rte_flow entries */
+ roc_npc_fini(&dev->npc);
+
+ /* Disable link status events */
+ roc_nix_mac_link_event_start_stop(nix, false);
+
/* Free up SQs */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
- /* Check if mbox close is needed */
- if (!mbox_close)
- return 0;
-
rc = roc_nix_dev_fini(nix);
/* Can be freed later by PMD if NPA LF is in use */
if (rc == -EAGAIN) {
- eth_dev->data->dev_private = NULL;
+ if (!reset)
+ eth_dev->data->dev_private = NULL;
return 0;
} else if (rc) {
plt_err("Failed in nix dev fini, rc=%d", rc);
return rc;
}
+static int
+cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
+{
+ cnxk_eth_dev_uninit(eth_dev, false);
+ return 0;
+}
+
+static int
+cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
+{
+ int rc;
+
+ rc = cnxk_eth_dev_uninit(eth_dev, true);
+ if (rc)
+ return rc;
+
+ return cnxk_eth_dev_init(eth_dev);
+}
+
int
cnxk_nix_remove(struct rte_pci_device *pci_dev)
{
eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
if (eth_dev) {
/* Cleanup eth dev */
- rc = cnxk_eth_dev_uninit(eth_dev, true);
+ rc = cnxk_eth_dev_uninit(eth_dev, false);
if (rc)
return rc;