X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcnxk%2Fcnxk_ethdev.c;h=db54468dbca1f875bfe1c4b7519384e84dc05654;hb=295968d1740760337e16b0d7914875c5cac52850;hp=ec00e620ebed46c25b1dadedde87a3d31c105e61;hpb=137fbfc6b6711bddc05c32324db972801aad7366;p=dpdk.git diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index ec00e620eb..db54468dbc 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -10,7 +10,7 @@ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev) if (roc_nix_is_vf_or_sdp(&dev->nix) || dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) - capa &= ~DEV_RX_OFFLOAD_TIMESTAMP; + capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP; return capa; } @@ -28,11 +28,11 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev) uint32_t speed_capa; /* Auto negotiation disabled */ - speed_capa = ETH_LINK_SPEED_FIXED; + speed_capa = RTE_ETH_LINK_SPEED_FIXED; if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) { - speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | - ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G | - ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G; + speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G | + RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G | + RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G; } return speed_capa; @@ -65,7 +65,7 @@ nix_security_setup(struct cnxk_eth_dev *dev) struct roc_nix *nix = &dev->nix; int i, rc = 0; - if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { /* Setup Inline Inbound */ rc = roc_nix_inl_inb_init(nix); if (rc) { @@ -80,8 +80,8 @@ nix_security_setup(struct cnxk_eth_dev *dev) cnxk_nix_inb_mode_set(dev, true); } - if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY || - dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY || + dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { struct plt_bitmap *bmap; size_t bmap_sz; void *mem; @@ -100,8 +100,8 @@ nix_security_setup(struct cnxk_eth_dev *dev) dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix); - /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */ - if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)) + /* Skip the rest if RTE_ETH_TX_OFFLOAD_SECURITY is not enabled */ + if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)) goto done; rc = -ENOMEM; @@ -136,11 +136,43 @@ nix_security_setup(struct cnxk_eth_dev *dev) done: return 0; cleanup: - if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) rc |= roc_nix_inl_inb_fini(nix); return rc; } +static int +nix_meter_fini(struct cnxk_eth_dev *dev) +{ + struct cnxk_meter_node *next_mtr = NULL; + struct roc_nix_bpf_objs profs = {0}; + struct cnxk_meter_node *mtr = NULL; + struct cnxk_mtr *fms = &dev->mtr; + struct roc_nix *nix = &dev->nix; + struct roc_nix_rq *rq; + uint32_t i; + int rc; + + RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) { + for (i = 0; i < mtr->rq_num; i++) { + rq = &dev->rqs[mtr->rq_id[i]]; + rc |= roc_nix_bpf_ena_dis(nix, mtr->bpf_id, rq, false); + } + + profs.level = mtr->level; + profs.count = 1; + profs.ids[0] = mtr->bpf_id; + rc = roc_nix_bpf_free(nix, &profs, 1); + + if (rc) + return rc; + + TAILQ_REMOVE(fms, mtr, next); + plt_free(mtr); + } + return 0; +} + static int nix_security_release(struct cnxk_eth_dev *dev) { @@ -150,7 +182,7 @@ nix_security_release(struct cnxk_eth_dev *dev) int rc, ret = 0; /* Cleanup Inline inbound */ - if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { /* Destroy inbound sessions */ tvar = NULL; RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar) @@ -167,8 +199,8 @@ nix_security_release(struct cnxk_eth_dev *dev) } /* Cleanup Inline outbound */ - if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY || - dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY || + dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { /* Destroy outbound sessions */ tvar = NULL; RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar) @@ -209,9 +241,9 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq) mbp_priv = rte_mempool_get_priv(rxq->qconf.mp); buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; - if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) { - dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER; - dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) { + dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; + dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; } } @@ -220,18 +252,13 @@ nix_recalc_mtu(struct rte_eth_dev *eth_dev) { struct rte_eth_dev_data *data = eth_dev->data; struct cnxk_eth_rxq_sp *rxq; - uint16_t mtu; int rc; rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1; /* Setup scatter mode if needed by jumbo */ nix_enable_mseg_on_jumbo(rxq); - /* Setup MTU based on max_rx_pkt_len */ - mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD + - CNXK_NIX_MAX_VTAG_ACT_SIZE; - - rc = cnxk_nix_mtu_set(eth_dev, mtu); + rc = cnxk_nix_mtu_set(eth_dev, data->mtu); if (rc) plt_err("Failed to set default MTU size, rc=%d", rc); @@ -246,7 +273,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev) struct rte_eth_fc_conf fc_conf = {0}; int rc; - /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW + /* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW * by AF driver, update those info in PMD structure. */ rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf); @@ -254,10 +281,10 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev) goto exit; fc->mode = fc_conf.mode; - fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) || - (fc_conf.mode == RTE_FC_RX_PAUSE); - fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) || - (fc_conf.mode == RTE_FC_TX_PAUSE); + fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) || + (fc_conf.mode == RTE_ETH_FC_RX_PAUSE); + fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) || + (fc_conf.mode == RTE_ETH_FC_TX_PAUSE); exit: return rc; @@ -278,11 +305,11 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev) /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */ if (roc_model_is_cn96_ax() && dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG && - (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) { + (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) { fc_cfg.mode = - (fc_cfg.mode == RTE_FC_FULL || - fc_cfg.mode == RTE_FC_TX_PAUSE) ? - RTE_FC_TX_PAUSE : RTE_FC_NONE; + (fc_cfg.mode == RTE_ETH_FC_FULL || + fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ? + RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE; } return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg); @@ -325,7 +352,7 @@ nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev) * Maximum three segments can be supported with W8, Choose * NIX_MAXSQESZ_W16 for multi segment offload. */ - if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) + if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) return NIX_MAXSQESZ_W16; else return NIX_MAXSQESZ_W8; @@ -346,14 +373,14 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, /* Free memory prior to re-allocation if needed. */ if (eth_dev->data->tx_queues[qid] != NULL) { plt_nix_dbg("Freeing memory prior to re-allocation %d", qid); - dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]); + dev_ops->tx_queue_release(eth_dev, qid); eth_dev->data->tx_queues[qid] = NULL; } /* When Tx Security offload is enabled, increase tx desc count by * max possible outbound desc count. */ - if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) + if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) nb_desc += dev->outb.nb_desc; /* Setup ROC SQ */ @@ -396,20 +423,20 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, } static void -cnxk_nix_tx_queue_release(void *txq) +cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid) { + void *txq = eth_dev->data->tx_queues[qid]; struct cnxk_eth_txq_sp *txq_sp; struct cnxk_eth_dev *dev; struct roc_nix_sq *sq; - uint16_t qid; int rc; if (!txq) return; txq_sp = cnxk_eth_txq_to_sp(txq); + dev = txq_sp->dev; - qid = txq_sp->qid; plt_nix_dbg("Releasing txq %u", qid); @@ -464,7 +491,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, const struct eth_dev_ops *dev_ops = eth_dev->dev_ops; plt_nix_dbg("Freeing memory prior to re-allocation %d", qid); - dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]); + dev_ops->rx_queue_release(eth_dev, qid); eth_dev->data->rx_queues[qid] = NULL; } @@ -472,7 +499,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, * to avoid meta packet drop as LBK does not currently support * backpressure. */ - if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) { + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) { uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get(); /* Use current RQ's aura limit if inl rq is not available */ @@ -534,7 +561,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, rxq_sp->qconf.nb_desc = nb_desc; rxq_sp->qconf.mp = mp; - if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { /* Setup rq reference for inline dev if present */ rc = roc_nix_inl_dev_rq_get(rq); if (rc) @@ -552,7 +579,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, * These are needed in deriving raw clock value from tsc counter. * read_clock eth op returns raw clock value. */ - if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) { + if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) { rc = cnxk_nix_tsc_convert(dev); if (rc) { plt_err("Failed to calculate delta and freq mult"); @@ -572,13 +599,13 @@ fail: } static void -cnxk_nix_rx_queue_release(void *rxq) +cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid) { + void *rxq = eth_dev->data->rx_queues[qid]; struct cnxk_eth_rxq_sp *rxq_sp; struct cnxk_eth_dev *dev; struct roc_nix_rq *rq; struct roc_nix_cq *cq; - uint16_t qid; int rc; if (!rxq) @@ -586,13 +613,12 @@ cnxk_nix_rx_queue_release(void *rxq) rxq_sp = cnxk_eth_rxq_to_sp(rxq); dev = rxq_sp->dev; - qid = rxq_sp->qid; rq = &dev->rqs[qid]; plt_nix_dbg("Releasing rxq %u", qid); /* Release rq reference for inline dev if present */ - if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) roc_nix_inl_dev_rq_put(rq); /* Cleanup ROC RQ */ @@ -631,24 +657,24 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss, dev->ethdev_rss_hf = ethdev_rss; - if (ethdev_rss & ETH_RSS_L2_PAYLOAD && + if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD && dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) { flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B; } - if (ethdev_rss & ETH_RSS_C_VLAN) + if (ethdev_rss & RTE_ETH_RSS_C_VLAN) flowkey_cfg |= FLOW_KEY_TYPE_VLAN; - if (ethdev_rss & ETH_RSS_L3_SRC_ONLY) + if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY) flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC; - if (ethdev_rss & ETH_RSS_L3_DST_ONLY) + if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY) flowkey_cfg |= FLOW_KEY_TYPE_L3_DST; - if (ethdev_rss & ETH_RSS_L4_SRC_ONLY) + if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY) flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC; - if (ethdev_rss & ETH_RSS_L4_DST_ONLY) + if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY) flowkey_cfg |= FLOW_KEY_TYPE_L4_DST; if (ethdev_rss & RSS_IPV4_ENABLE) @@ -657,34 +683,34 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss, if (ethdev_rss & RSS_IPV6_ENABLE) flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX]; - if (ethdev_rss & ETH_RSS_TCP) + if (ethdev_rss & RTE_ETH_RSS_TCP) flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX]; - if (ethdev_rss & ETH_RSS_UDP) + if (ethdev_rss & RTE_ETH_RSS_UDP) flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX]; - if (ethdev_rss & ETH_RSS_SCTP) + if (ethdev_rss & RTE_ETH_RSS_SCTP) flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX]; - if (ethdev_rss & ETH_RSS_L2_PAYLOAD) + if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD) flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX]; if (ethdev_rss & RSS_IPV6_EX_ENABLE) flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT; - if (ethdev_rss & ETH_RSS_PORT) + if (ethdev_rss & RTE_ETH_RSS_PORT) flowkey_cfg |= FLOW_KEY_TYPE_PORT; - if (ethdev_rss & ETH_RSS_NVGRE) + if (ethdev_rss & RTE_ETH_RSS_NVGRE) flowkey_cfg |= FLOW_KEY_TYPE_NVGRE; - if (ethdev_rss & ETH_RSS_VXLAN) + if (ethdev_rss & RTE_ETH_RSS_VXLAN) flowkey_cfg |= FLOW_KEY_TYPE_VXLAN; - if (ethdev_rss & ETH_RSS_GENEVE) + if (ethdev_rss & RTE_ETH_RSS_GENEVE) flowkey_cfg |= FLOW_KEY_TYPE_GENEVE; - if (ethdev_rss & ETH_RSS_GTPU) + if (ethdev_rss & RTE_ETH_RSS_GTPU) flowkey_cfg |= FLOW_KEY_TYPE_GTPU; return flowkey_cfg; @@ -701,6 +727,16 @@ nix_free_queue_mem(struct cnxk_eth_dev *dev) dev->sqs = NULL; } +static int +nix_ingress_policer_setup(struct cnxk_eth_dev *dev) +{ + TAILQ_INIT(&dev->mtr_profiles); + TAILQ_INIT(&dev->mtr_policy); + TAILQ_INIT(&dev->mtr); + + return 0; +} + static int nix_rss_default_setup(struct cnxk_eth_dev *dev) { @@ -710,7 +746,7 @@ nix_rss_default_setup(struct cnxk_eth_dev *dev) uint64_t rss_hf; rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; - rss_hash_level = ETH_RSS_LEVEL(rss_hf); + rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf); if (rss_hash_level) rss_hash_level -= 1; @@ -755,7 +791,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) txq_sp = cnxk_eth_txq_to_sp(txq[i]); memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf)); tx_qconf[i].valid = true; - dev_ops->tx_queue_release(txq[i]); + dev_ops->tx_queue_release(eth_dev, i); eth_dev->data->tx_queues[i] = NULL; } @@ -769,7 +805,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]); memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf)); rx_qconf[i].valid = true; - dev_ops->rx_queue_release(rxq[i]); + dev_ops->rx_queue_release(eth_dev, i); eth_dev->data->rx_queues[i] = NULL; } @@ -791,7 +827,6 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf; struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf; int rc, i, nb_rxq, nb_txq; - void **txq, **rxq; nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues); nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues); @@ -826,9 +861,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) &tx_qconf[i].conf.tx); if (rc) { plt_err("Failed to setup tx queue rc=%d", rc); - txq = eth_dev->data->tx_queues; for (i -= 1; i >= 0; i--) - dev_ops->tx_queue_release(txq[i]); + dev_ops->tx_queue_release(eth_dev, i); goto fail; } } @@ -844,9 +878,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) rx_qconf[i].mp); if (rc) { plt_err("Failed to setup rx queue rc=%d", rc); - rxq = eth_dev->data->rx_queues; for (i -= 1; i >= 0; i--) - dev_ops->rx_queue_release(rxq[i]); + dev_ops->rx_queue_release(eth_dev, i); goto tx_queue_release; } } @@ -857,9 +890,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) return 0; tx_queue_release: - txq = eth_dev->data->tx_queues; for (i = 0; i < eth_dev->data->nb_tx_queues; i++) - dev_ops->tx_queue_release(txq[i]); + dev_ops->tx_queue_release(eth_dev, i); fail: if (tx_qconf) free(tx_qconf); @@ -926,8 +958,8 @@ nix_lso_fmt_setup(struct cnxk_eth_dev *dev) /* Nothing much to do if offload is not enabled */ if (!(dev->tx_offloads & - (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))) + (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))) return 0; /* Setup LSO formats in AF. Its a no-op if other ethdev has @@ -975,13 +1007,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) goto fail_configure; } - if (rxmode->mq_mode != ETH_MQ_RX_NONE && - rxmode->mq_mode != ETH_MQ_RX_RSS) { + if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE && + rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) { plt_err("Unsupported mq rx mode %d", rxmode->mq_mode); goto fail_configure; } - if (txmode->mq_mode != ETH_MQ_TX_NONE) { + if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) { plt_err("Unsupported mq tx mode %d", txmode->mq_mode); goto fail_configure; } @@ -1002,6 +1034,11 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) if (rc) goto fail_configure; + /* Disable and free rte_meter entries */ + rc = nix_meter_fini(dev); + if (rc) + goto fail_configure; + /* Cleanup security support */ rc = nix_security_release(dev); if (rc) @@ -1017,7 +1054,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) /* Prepare rx cfg */ rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD; if (dev->rx_offloads & - (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) { + (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) { rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4; rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4; } @@ -1025,7 +1062,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 | ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3); - if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT; /* Disable drop re if rx offload security is enabled and * platform does not support it. @@ -1113,6 +1150,12 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) goto free_nix_lf; } + rc = nix_ingress_policer_setup(dev); + if (rc) { + plt_err("Failed to setup ingress policer rc=%d", rc); + goto free_nix_lf; + } + rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false); if (rc) { plt_err("Failed to enable default tm hierarchy, rc=%d", rc); @@ -1411,12 +1454,12 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev) * enabled on PF owning this VF */ memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info)); - if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) + if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) cnxk_eth_dev_ops.timesync_enable(eth_dev); else cnxk_eth_dev_ops.timesync_disable(eth_dev); - if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) { + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { rc = rte_mbuf_dyn_rx_timestamp_register (&dev->tstamp.tstamp_dynfield_offset, &dev->tstamp.rx_tstamp_dynflag); @@ -1498,6 +1541,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = { .set_mc_addr_list = cnxk_nix_mc_addr_list_configure, .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit, .tm_ops_get = cnxk_nix_tm_ops_get, + .mtr_ops_get = cnxk_nix_mtr_ops_get, }; static int @@ -1651,6 +1695,9 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) roc_nix_npc_rx_ena_dis(nix, false); + /* Disable and free rte_meter entries */ + nix_meter_fini(dev); + /* Disable and free rte_flow entries */ roc_npc_fini(&dev->npc); @@ -1664,14 +1711,14 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) /* Free up SQs */ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { - dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]); + dev_ops->tx_queue_release(eth_dev, i); eth_dev->data->tx_queues[i] = NULL; } eth_dev->data->nb_tx_queues = 0; /* Free up RQ's and CQ's */ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { - dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]); + dev_ops->rx_queue_release(eth_dev, i); eth_dev->data->rx_queues[i] = NULL; } eth_dev->data->nb_rx_queues = 0;