if (roc_nix_is_vf_or_sdp(&dev->nix) ||
dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
- capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+ capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
return capa;
}
uint32_t speed_capa;
/* Auto negotiation disabled */
- speed_capa = ETH_LINK_SPEED_FIXED;
+ speed_capa = RTE_ETH_LINK_SPEED_FIXED;
if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
- speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+ speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
}
return speed_capa;
}
+int
+cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
+{
+ struct roc_nix *nix = &dev->nix;
+
+ if (dev->inb.inl_dev == use_inl_dev)
+ return 0;
+
+ plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
+ dev->inb.nb_sess, !!dev->inb.inl_dev);
+
+ /* Change the mode */
+ dev->inb.inl_dev = use_inl_dev;
+
+ /* Update RoC for NPC rule insertion */
+ roc_nix_inb_mode_set(nix, use_inl_dev);
+
+ /* Setup lookup mem */
+ return cnxk_nix_lookup_mem_sa_base_set(dev);
+}
+
+static int
+nix_security_setup(struct cnxk_eth_dev *dev)
+{
+ struct roc_nix *nix = &dev->nix;
+ int i, rc = 0;
+
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+ /* Setup Inline Inbound */
+ rc = roc_nix_inl_inb_init(nix);
+ if (rc) {
+ plt_err("Failed to initialize nix inline inb, rc=%d",
+ rc);
+ return rc;
+ }
+
+ /* By default pick using inline device for poll mode.
+ * Will be overridden when event mode rq's are setup.
+ */
+ cnxk_nix_inb_mode_set(dev, true);
+ }
+
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+ dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+ struct plt_bitmap *bmap;
+ size_t bmap_sz;
+ void *mem;
+
+ /* Setup enough descriptors for all tx queues */
+ nix->outb_nb_desc = dev->outb.nb_desc;
+ nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
+
+ /* Setup Inline Outbound */
+ rc = roc_nix_inl_outb_init(nix);
+ if (rc) {
+ plt_err("Failed to initialize nix inline outb, rc=%d",
+ rc);
+ goto cleanup;
+ }
+
+ dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
+
+ /* Skip the rest if RTE_ETH_TX_OFFLOAD_SECURITY is not enabled */
+ if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
+ goto done;
+
+ rc = -ENOMEM;
+ /* Allocate a bitmap to alloc and free sa indexes */
+ bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
+ mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ plt_err("Outbound SA bmap alloc failed");
+
+ rc |= roc_nix_inl_outb_fini(nix);
+ goto cleanup;
+ }
+
+ rc = -EIO;
+ bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
+ if (!bmap) {
+ plt_err("Outbound SA bmap init failed");
+
+ rc |= roc_nix_inl_outb_fini(nix);
+ plt_free(mem);
+ goto cleanup;
+ }
+
+ for (i = 0; i < dev->outb.max_sa; i++)
+ plt_bitmap_set(bmap, i);
+
+ dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
+ dev->outb.sa_bmap_mem = mem;
+ dev->outb.sa_bmap = bmap;
+ }
+
+done:
+ return 0;
+cleanup:
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+ rc |= roc_nix_inl_inb_fini(nix);
+ return rc;
+}
+
+static int
+nix_meter_fini(struct cnxk_eth_dev *dev)
+{
+ struct cnxk_meter_node *next_mtr = NULL;
+ struct roc_nix_bpf_objs profs = {0};
+ struct cnxk_meter_node *mtr = NULL;
+ struct cnxk_mtr *fms = &dev->mtr;
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_rq *rq;
+ uint32_t i;
+ int rc;
+
+ RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) {
+ for (i = 0; i < mtr->rq_num; i++) {
+ rq = &dev->rqs[mtr->rq_id[i]];
+ rc |= roc_nix_bpf_ena_dis(nix, mtr->bpf_id, rq, false);
+ }
+
+ profs.level = mtr->level;
+ profs.count = 1;
+ profs.ids[0] = mtr->bpf_id;
+ rc = roc_nix_bpf_free(nix, &profs, 1);
+
+ if (rc)
+ return rc;
+
+ TAILQ_REMOVE(fms, mtr, next);
+ plt_free(mtr);
+ }
+ return 0;
+}
+
+static int
+nix_security_release(struct cnxk_eth_dev *dev)
+{
+ struct rte_eth_dev *eth_dev = dev->eth_dev;
+ struct cnxk_eth_sec_sess *eth_sec, *tvar;
+ struct roc_nix *nix = &dev->nix;
+ int rc, ret = 0;
+
+ /* Cleanup Inline inbound */
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+ /* Destroy inbound sessions */
+ tvar = NULL;
+ RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
+ cnxk_eth_sec_ops.session_destroy(eth_dev,
+ eth_sec->sess);
+
+ /* Clear lookup mem */
+ cnxk_nix_lookup_mem_sa_base_clear(dev);
+
+ rc = roc_nix_inl_inb_fini(nix);
+ if (rc)
+ plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
+ ret |= rc;
+ }
+
+ /* Cleanup Inline outbound */
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+ dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+ /* Destroy outbound sessions */
+ tvar = NULL;
+ RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
+ cnxk_eth_sec_ops.session_destroy(eth_dev,
+ eth_sec->sess);
+
+ rc = roc_nix_inl_outb_fini(nix);
+ if (rc)
+ plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
+ ret |= rc;
+
+ plt_bitmap_free(dev->outb.sa_bmap);
+ plt_free(dev->outb.sa_bmap_mem);
+ dev->outb.sa_bmap = NULL;
+ dev->outb.sa_bmap_mem = NULL;
+ }
+
+ dev->inb.inl_dev = false;
+ roc_nix_inb_mode_set(nix, false);
+ dev->nb_rxq_sso = 0;
+ dev->inb.nb_sess = 0;
+ dev->outb.nb_sess = 0;
+ return ret;
+}
+
static void
nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
{
mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
- if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
- dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
- dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
+ dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+ dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
}
}
{
struct rte_eth_dev_data *data = eth_dev->data;
struct cnxk_eth_rxq_sp *rxq;
- uint16_t mtu;
int rc;
rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
/* Setup scatter mode if needed by jumbo */
nix_enable_mseg_on_jumbo(rxq);
- /* Setup MTU based on max_rx_pkt_len */
- mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
- CNXK_NIX_MAX_VTAG_ACT_SIZE;
-
- rc = cnxk_nix_mtu_set(eth_dev, mtu);
+ rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
if (rc)
plt_err("Failed to set default MTU size, rc=%d", rc);
struct rte_eth_fc_conf fc_conf = {0};
int rc;
- /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+ /* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
* by AF driver, update those info in PMD structure.
*/
rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
goto exit;
fc->mode = fc_conf.mode;
- fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
- (fc_conf.mode == RTE_FC_RX_PAUSE);
- fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
- (fc_conf.mode == RTE_FC_TX_PAUSE);
+ fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+ (fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+ fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+ (fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
exit:
return rc;
/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
if (roc_model_is_cn96_ax() &&
dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
- (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+ (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
fc_cfg.mode =
- (fc_cfg.mode == RTE_FC_FULL ||
- fc_cfg.mode == RTE_FC_TX_PAUSE) ?
- RTE_FC_TX_PAUSE : RTE_FC_NONE;
+ (fc_cfg.mode == RTE_ETH_FC_FULL ||
+ fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
+ RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
}
return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
* Maximum three segments can be supported with W8, Choose
* NIX_MAXSQESZ_W16 for multi segment offload.
*/
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
return NIX_MAXSQESZ_W16;
else
return NIX_MAXSQESZ_W8;
/* Free memory prior to re-allocation if needed. */
if (eth_dev->data->tx_queues[qid] != NULL) {
plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
- dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
+ dev_ops->tx_queue_release(eth_dev, qid);
eth_dev->data->tx_queues[qid] = NULL;
}
+ /* When Tx Security offload is enabled, increase tx desc count by
+ * max possible outbound desc count.
+ */
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+ nb_desc += dev->outb.nb_desc;
+
/* Setup ROC SQ */
sq = &dev->sqs[qid];
sq->qid = qid;
txq_sp->dev = dev;
txq_sp->qid = qid;
txq_sp->qconf.conf.tx = *tx_conf;
+ /* Queue config should reflect global offloads */
+ txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
txq_sp->qconf.nb_desc = nb_desc;
plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
}
static void
-cnxk_nix_tx_queue_release(void *txq)
+cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
{
+ void *txq = eth_dev->data->tx_queues[qid];
struct cnxk_eth_txq_sp *txq_sp;
struct cnxk_eth_dev *dev;
struct roc_nix_sq *sq;
- uint16_t qid;
int rc;
if (!txq)
return;
txq_sp = cnxk_eth_txq_to_sp(txq);
+
dev = txq_sp->dev;
- qid = txq_sp->qid;
plt_nix_dbg("Releasing txq %u", qid);
struct rte_mempool *mp)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
struct cnxk_eth_rxq_sp *rxq_sp;
struct rte_mempool_ops *ops;
const char *platform_ops;
const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
- dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
+ dev_ops->rx_queue_release(eth_dev, qid);
eth_dev->data->rx_queues[qid] = NULL;
}
+ /* Clam up cq limit to size of packet pool aura for LBK
+ * to avoid meta packet drop as LBK does not currently support
+ * backpressure.
+ */
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
+ uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
+
+ /* Use current RQ's aura limit if inl rq is not available */
+ if (!pkt_pool_limit)
+ pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
+ nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
+ }
+
/* Setup ROC CQ */
cq = &dev->cqs[qid];
cq->qid = qid;
rq->later_skip = sizeof(struct rte_mbuf);
rq->lpb_size = mp->elt_size;
+ /* Enable Inline IPSec on RQ, will not be used for Poll mode */
+ if (roc_nix_inl_inb_is_enabled(nix))
+ rq->ipsech_ena = true;
+
rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
if (rc) {
plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
rxq_sp->dev = dev;
rxq_sp->qid = qid;
rxq_sp->qconf.conf.rx = *rx_conf;
+ /* Queue config should reflect global offloads */
+ rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
rxq_sp->qconf.nb_desc = nb_desc;
rxq_sp->qconf.mp = mp;
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+ /* Setup rq reference for inline dev if present */
+ rc = roc_nix_inl_dev_rq_get(rq);
+ if (rc)
+ goto free_mem;
+ }
+
plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
cq->nb_desc);
* These are needed in deriving raw clock value from tsc counter.
* read_clock eth op returns raw clock value.
*/
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
rc = cnxk_nix_tsc_convert(dev);
if (rc) {
plt_err("Failed to calculate delta and freq mult");
}
return 0;
+free_mem:
+ plt_free(rxq_sp);
rq_fini:
rc |= roc_nix_rq_fini(rq);
cq_fini:
}
static void
-cnxk_nix_rx_queue_release(void *rxq)
+cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
{
+ void *rxq = eth_dev->data->rx_queues[qid];
struct cnxk_eth_rxq_sp *rxq_sp;
struct cnxk_eth_dev *dev;
struct roc_nix_rq *rq;
struct roc_nix_cq *cq;
- uint16_t qid;
int rc;
if (!rxq)
rxq_sp = cnxk_eth_rxq_to_sp(rxq);
dev = rxq_sp->dev;
- qid = rxq_sp->qid;
+ rq = &dev->rqs[qid];
plt_nix_dbg("Releasing rxq %u", qid);
+ /* Release rq reference for inline dev if present */
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+ roc_nix_inl_dev_rq_put(rq);
+
/* Cleanup ROC RQ */
- rq = &dev->rqs[qid];
rc = roc_nix_rq_fini(rq);
if (rc)
plt_err("Failed to cleanup rq, rc=%d", rc);
dev->ethdev_rss_hf = ethdev_rss;
- if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+ if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
}
- if (ethdev_rss & ETH_RSS_C_VLAN)
+ if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
- if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
- if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
- if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
- if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
if (ethdev_rss & RSS_IPV4_ENABLE)
if (ethdev_rss & RSS_IPV6_ENABLE)
flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
- if (ethdev_rss & ETH_RSS_TCP)
+ if (ethdev_rss & RTE_ETH_RSS_TCP)
flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
- if (ethdev_rss & ETH_RSS_UDP)
+ if (ethdev_rss & RTE_ETH_RSS_UDP)
flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
- if (ethdev_rss & ETH_RSS_SCTP)
+ if (ethdev_rss & RTE_ETH_RSS_SCTP)
flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
- if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+ if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
if (ethdev_rss & RSS_IPV6_EX_ENABLE)
flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
- if (ethdev_rss & ETH_RSS_PORT)
+ if (ethdev_rss & RTE_ETH_RSS_PORT)
flowkey_cfg |= FLOW_KEY_TYPE_PORT;
- if (ethdev_rss & ETH_RSS_NVGRE)
+ if (ethdev_rss & RTE_ETH_RSS_NVGRE)
flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
- if (ethdev_rss & ETH_RSS_VXLAN)
+ if (ethdev_rss & RTE_ETH_RSS_VXLAN)
flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
- if (ethdev_rss & ETH_RSS_GENEVE)
+ if (ethdev_rss & RTE_ETH_RSS_GENEVE)
flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
- if (ethdev_rss & ETH_RSS_GTPU)
+ if (ethdev_rss & RTE_ETH_RSS_GTPU)
flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
return flowkey_cfg;
dev->sqs = NULL;
}
+static int
+nix_ingress_policer_setup(struct cnxk_eth_dev *dev)
+{
+ TAILQ_INIT(&dev->mtr_profiles);
+ TAILQ_INIT(&dev->mtr_policy);
+ TAILQ_INIT(&dev->mtr);
+
+ return 0;
+}
+
static int
nix_rss_default_setup(struct cnxk_eth_dev *dev)
{
uint64_t rss_hf;
rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
- rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+ rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
if (rss_hash_level)
rss_hash_level -= 1;
txq_sp = cnxk_eth_txq_to_sp(txq[i]);
memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
tx_qconf[i].valid = true;
- dev_ops->tx_queue_release(txq[i]);
+ dev_ops->tx_queue_release(eth_dev, i);
eth_dev->data->tx_queues[i] = NULL;
}
rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
rx_qconf[i].valid = true;
- dev_ops->rx_queue_release(rxq[i]);
+ dev_ops->rx_queue_release(eth_dev, i);
eth_dev->data->rx_queues[i] = NULL;
}
struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
int rc, i, nb_rxq, nb_txq;
- void **txq, **rxq;
nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
&tx_qconf[i].conf.tx);
if (rc) {
plt_err("Failed to setup tx queue rc=%d", rc);
- txq = eth_dev->data->tx_queues;
for (i -= 1; i >= 0; i--)
- dev_ops->tx_queue_release(txq[i]);
+ dev_ops->tx_queue_release(eth_dev, i);
goto fail;
}
}
rx_qconf[i].mp);
if (rc) {
plt_err("Failed to setup rx queue rc=%d", rc);
- rxq = eth_dev->data->rx_queues;
for (i -= 1; i >= 0; i--)
- dev_ops->rx_queue_release(rxq[i]);
+ dev_ops->rx_queue_release(eth_dev, i);
goto tx_queue_release;
}
}
return 0;
tx_queue_release:
- txq = eth_dev->data->tx_queues;
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
- dev_ops->tx_queue_release(txq[i]);
+ dev_ops->tx_queue_release(eth_dev, i);
fail:
if (tx_qconf)
free(tx_qconf);
/* Nothing much to do if offload is not enabled */
if (!(dev->tx_offloads &
- (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
+ (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
return 0;
/* Setup LSO formats in AF. Its a no-op if other ethdev has
goto fail_configure;
}
- if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
- rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
goto fail_configure;
}
- if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+ if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
goto fail_configure;
}
rc = nix_store_queue_cfg_and_then_release(eth_dev);
if (rc)
goto fail_configure;
+
+ /* Disable and free rte_meter entries */
+ rc = nix_meter_fini(dev);
+ if (rc)
+ goto fail_configure;
+
+ /* Cleanup security support */
+ rc = nix_security_release(dev);
+ if (rc)
+ goto fail_configure;
+
roc_nix_tm_fini(nix);
roc_nix_lf_free(nix);
}
/* Prepare rx cfg */
rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
if (dev->rx_offloads &
- (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+ (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
}
ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+ rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
+ /* Disable drop re if rx offload security is enabled and
+ * platform does not support it.
+ */
+ if (dev->ipsecd_drop_re_dis)
+ rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE);
+ }
+
nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
nb_txq = RTE_MAX(data->nb_tx_queues, 1);
goto free_nix_lf;
}
+ rc = nix_ingress_policer_setup(dev);
+ if (rc) {
+ plt_err("Failed to setup ingress policer rc=%d", rc);
+ goto free_nix_lf;
+ }
+
rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
if (rc) {
plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
plt_err("Failed to initialize flow control rc=%d", rc);
goto cq_fini;
}
+
+ /* Setup Inline security support */
+ rc = nix_security_setup(dev);
+ if (rc)
+ goto cq_fini;
+
/*
* Restore queue config when reconfigure followed by
* reconfigure and no queue configure invoked from application case.
if (dev->configured == 1) {
rc = nix_restore_queue_cfg(eth_dev);
if (rc)
- goto cq_fini;
+ goto sec_release;
}
/* Update the mac address */
dev->nb_txq = data->nb_tx_queues;
return 0;
+sec_release:
+ rc |= nix_security_release(dev);
cq_fini:
roc_nix_unregister_cq_irqs(nix);
q_irq_fini:
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
struct rte_mbuf *rx_pkts[32];
+ struct rte_eth_link link;
int count, i, j, rc;
void *rxq;
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
dev_ops->tx_queue_stop(eth_dev, i);
+ /* Bring down link status internally */
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(eth_dev, &link);
+
return 0;
}
* enabled on PF owning this VF
*/
memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
cnxk_eth_dev_ops.timesync_enable(eth_dev);
else
cnxk_eth_dev_ops.timesync_disable(eth_dev);
- if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
rc = rte_mbuf_dyn_rx_timestamp_register
(&dev->tstamp.tstamp_dynfield_offset,
&dev->tstamp.rx_tstamp_dynflag);
.timesync_read_time = cnxk_nix_timesync_read_time,
.timesync_write_time = cnxk_nix_timesync_write_time,
.timesync_adjust_time = cnxk_nix_timesync_adjust_time,
+ .read_clock = cnxk_nix_read_clock,
+ .reta_update = cnxk_nix_reta_update,
+ .reta_query = cnxk_nix_reta_query,
+ .rss_hash_update = cnxk_nix_rss_hash_update,
+ .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
+ .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
+ .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
+ .tm_ops_get = cnxk_nix_tm_ops_get,
+ .mtr_ops_get = cnxk_nix_mtr_ops_get,
};
static int
cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_security_ctx *sec_ctx;
struct roc_nix *nix = &dev->nix;
struct rte_pci_device *pci_dev;
int rc, max_entries;
eth_dev->dev_ops = &cnxk_eth_dev_ops;
+ /* Alloc security context */
+ sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
+ if (!sec_ctx)
+ return -ENOMEM;
+ sec_ctx->device = eth_dev;
+ sec_ctx->ops = &cnxk_eth_sec_ops;
+ sec_ctx->flags =
+ (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
+ eth_dev->security_ctx = sec_ctx;
+ TAILQ_INIT(&dev->inb.list);
+ TAILQ_INIT(&dev->outb.list);
+
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
/* Initialize base roc nix */
nix->pci_dev = pci_dev;
+ nix->hw_vlan_ins = true;
rc = roc_nix_dev_init(nix);
if (rc) {
plt_err("Failed to initialize roc nix rc=%d", rc);
/* Register up msg callbacks */
roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
+ /* Register up msg callbacks */
+ roc_nix_mac_link_info_get_cb_register(nix,
+ cnxk_eth_dev_link_status_get_cb);
+
dev->eth_dev = eth_dev;
dev->configured = 0;
dev->ptype_disable = 0;
}
dev->max_mac_entries = max_entries;
+ dev->dmac_filter_count = 1;
/* Get mac address */
rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
struct roc_nix *nix = &dev->nix;
int rc, i;
+ plt_free(eth_dev->security_ctx);
+ eth_dev->security_ctx = NULL;
+
/* Nothing to be done for secondary processes */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
roc_nix_npc_rx_ena_dis(nix, false);
+ /* Disable and free rte_meter entries */
+ nix_meter_fini(dev);
+
/* Disable and free rte_flow entries */
roc_npc_fini(&dev->npc);
/* Disable link status events */
roc_nix_mac_link_event_start_stop(nix, false);
+ /* Unregister the link update op, this is required to stop VFs from
+ * receiving link status updates on exit path.
+ */
+ roc_nix_mac_link_cb_unregister(nix);
+
/* Free up SQs */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
+ dev_ops->tx_queue_release(eth_dev, i);
eth_dev->data->tx_queues[i] = NULL;
}
eth_dev->data->nb_tx_queues = 0;
/* Free up RQ's and CQ's */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);
+ dev_ops->rx_queue_release(eth_dev, i);
eth_dev->data->rx_queues[i] = NULL;
}
eth_dev->data->nb_rx_queues = 0;
+ /* Free security resources */
+ nix_security_release(dev);
+
/* Free tm resources */
roc_nix_tm_fini(nix);