X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Focteontx2%2Fotx2_ethdev.c;h=b84128fefb5a2752d0c722e3b67451fc53fbef1a;hb=400d985eb586aae5ef009431cd251bd3d5c42ce2;hp=fcb1869d5871678fcc4741cae7d3f59815a8c369;hpb=d1d823e7a8652b4df81ff4e0f47a38263618979c;p=dpdk.git diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c index fcb1869d58..b84128fefb 100644 --- a/drivers/net/octeontx2/otx2_ethdev.c +++ b/drivers/net/octeontx2/otx2_ethdev.c @@ -3,7 +3,6 @@ */ #include -#include #include #include @@ -63,6 +62,7 @@ nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq) req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */); req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */); } + req->rx_cfg |= BIT_ULL(32 /* DROP_RE */); rc = otx2_mbox_process_msg(mbox, (void *)&rsp); if (rc) @@ -267,26 +267,31 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT); aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR); - /* TX pause frames enable flowctrl on RX side */ - if (dev->fc_info.tx_pause) { - /* Single bpid is allocated for all rx channels for now */ - aq->cq.bpid = dev->fc_info.bpid[0]; - aq->cq.bp = NIX_CQ_BP_LEVEL; - aq->cq.bp_ena = 1; - } - /* Many to one reduction */ aq->cq.qint_idx = qid % dev->qints; /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */ aq->cq.cint_idx = qid; if (otx2_ethdev_fixup_is_limit_cq_full(dev)) { + const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID; uint16_t min_rx_drop; - const float rx_cq_skid = 1024 * 256; min_rx_drop = ceil(rx_cq_skid / (float)cq_size); aq->cq.drop = min_rx_drop; aq->cq.drop_ena = 1; + rxq->cq_drop = min_rx_drop; + } else { + rxq->cq_drop = NIX_CQ_THRESH_LEVEL; + aq->cq.drop = rxq->cq_drop; + aq->cq.drop_ena = 1; + } + + /* TX pause frames enable flowctrl on RX side */ + if (dev->fc_info.tx_pause) { + /* Single bpid is allocated for all rx channels for now */ + aq->cq.bpid = dev->fc_info.bpid[0]; + aq->cq.bp = rxq->cq_drop; + aq->cq.bp_ena = 1; } rc = otx2_mbox_process(mbox); @@ -325,8 +330,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, /* Many to one reduction */ aq->rq.qint_idx = qid % dev->qints; - if (otx2_ethdev_fixup_is_limit_cq_full(dev)) - aq->rq.xqe_drop_ena = 1; + aq->rq.xqe_drop_ena = 1; rc = otx2_mbox_process(mbox); if (rc) { @@ -521,6 +525,20 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq, eth_dev->data->rx_queues[rq] = rxq; eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED; + + /* Calculating delta and freq mult between PTP HI clock and tsc. + * These are needed in deriving raw clock value from tsc counter. + * read_clock eth op returns raw clock value. + */ + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || + otx2_ethdev_is_ptp_en(dev)) { + rc = otx2_nix_raw_clock_tsc_conv(dev); + if (rc) { + otx2_err("Failed to calculate delta and freq mult"); + goto fail; + } + } + return 0; free_rxq: @@ -586,6 +604,24 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev) RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52)); RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52)); RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52)); + RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54)); + RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55)); + RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58)); + RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59)); + RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60)); + RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41)); + RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7); + RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9); + RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7); + RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) != + offsetof(struct rte_mbuf, buf_iova) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, buf_iova) + 16); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, ol_flags) + 12); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) != + offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *)); if (conf & DEV_TX_OFFLOAD_VLAN_INSERT || conf & DEV_TX_OFFLOAD_QINQ_INSERT) @@ -607,6 +643,9 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev) if (conf & DEV_TX_OFFLOAD_MULTI_SEGS) flags |= NIX_TX_MULTI_SEG_F; + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)) + flags |= NIX_TX_OFFLOAD_TSTAMP_F; + return flags; } @@ -715,7 +754,8 @@ nix_sq_uninit(struct otx2_eth_txq *txq) while (count) { void *next_sqb; - next_sqb = *(void **)((uintptr_t)sqb_buf + ((sqes_per_sqb - 1) * + next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t) + ((sqes_per_sqb - 1) * nix_sq_max_sqe_sz(txq))); npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1, (uint64_t)sqb_buf); @@ -776,7 +816,7 @@ nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc) nb_sqb_bufs = nb_desc / sqes_per_sqb; /* Clamp up to devarg passed SQB count */ - nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_MIN_SQB, + nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_DEF_SQB, nb_sqb_bufs + NIX_SQB_LIST_SPACE)); txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz, @@ -853,8 +893,6 @@ otx2_nix_form_default_desc(struct otx2_eth_txq *txq) send_mem = (struct nix_send_mem_s *)(txq->cmd + (send_hdr->w0.sizem1 << 1)); send_mem->subdc = NIX_SUBDC_MEM; - send_mem->dsz = 0x0; - send_mem->wmem = 0x1; send_mem->alg = NIX_SENDMEMALG_SETTSTMP; send_mem->addr = txq->dev->tstamp.tx_tstamp_iova; } @@ -1185,38 +1223,41 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) /* Sanity checks */ if (rte_eal_has_hugepages() == 0) { otx2_err("Huge page is not configured"); - goto fail; - } - - if (rte_eal_iova_mode() != RTE_IOVA_VA) { - otx2_err("iova mode should be va"); - goto fail; + goto fail_configure; } if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { otx2_err("Setting link speed/duplex not supported"); - goto fail; + goto fail_configure; } if (conf->dcb_capability_en == 1) { otx2_err("dcb enable is not supported"); - goto fail; + goto fail_configure; } if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { otx2_err("Flow director is not supported"); - goto fail; + goto fail_configure; } if (rxmode->mq_mode != ETH_MQ_RX_NONE && rxmode->mq_mode != ETH_MQ_RX_RSS) { otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode); - goto fail; + goto fail_configure; } if (txmode->mq_mode != ETH_MQ_TX_NONE) { otx2_err("Unsupported mq tx mode %d", txmode->mq_mode); - goto fail; + goto fail_configure; + } + + if (otx2_dev_is_Ax(dev) && + (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) && + ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) || + (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) { + otx2_err("Outer IP and SCTP checksum unsupported"); + goto fail_configure; } /* Free the resources allocated from the previous configure */ @@ -1230,20 +1271,11 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) nix_set_nop_rxtx_function(eth_dev); rc = nix_store_queue_cfg_and_then_release(eth_dev); if (rc) - goto fail; + goto fail_configure; otx2_nix_tm_fini(eth_dev); nix_lf_free(dev); } - if (otx2_dev_is_Ax(dev) && - (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) && - ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) || - (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) { - otx2_err("Outer IP and SCTP checksum unsupported"); - rc = -EINVAL; - goto fail; - } - dev->rx_offloads = rxmode->offloads; dev->tx_offloads = txmode->offloads; dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev); @@ -1257,7 +1289,7 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) rc = nix_lf_alloc(dev, nb_rxq, nb_txq); if (rc) { otx2_err("Failed to init nix_lf rc=%d", rc); - goto fail; + goto fail_offloads; } /* Configure RSS */ @@ -1277,14 +1309,14 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) rc = otx2_nix_vlan_offload_init(eth_dev); if (rc) { otx2_err("Failed to init vlan offload rc=%d", rc); - goto free_nix_lf; + goto tm_fini; } /* Register queue IRQs */ rc = oxt2_nix_register_queue_irqs(eth_dev); if (rc) { otx2_err("Failed to register queue interrupts rc=%d", rc); - goto free_nix_lf; + goto vlan_fini; } /* Register cq IRQs */ @@ -1292,7 +1324,7 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) if (eth_dev->data->nb_rx_queues > dev->cints) { otx2_err("Rx interrupt cannot be enabled, rxq > %d", dev->cints); - goto free_nix_lf; + goto q_irq_fini; } /* Rx interrupt feature cannot work with vector mode because, * vector mode doesn't process packets unless min 4 pkts are @@ -1304,7 +1336,7 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) rc = oxt2_nix_register_cq_irqs(eth_dev); if (rc) { otx2_err("Failed to register CQ interrupts rc=%d", rc); - goto free_nix_lf; + goto q_irq_fini; } } @@ -1312,25 +1344,15 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode); if (rc) { otx2_err("Failed to configure cgx loop back mode rc=%d", rc); - goto free_nix_lf; + goto q_irq_fini; } rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true); if (rc) { otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc); - goto free_nix_lf; + goto q_irq_fini; } - /* Enable PTP if it was requested by the app or if it is already - * enabled in PF owning this VF - */ - memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info)); - if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || - otx2_ethdev_is_ptp_en(dev)) - otx2_nix_timesync_enable(eth_dev); - else - otx2_nix_timesync_disable(eth_dev); - /* * Restore queue config when reconfigure followed by * reconfigure and no queue configure invoked from application case. @@ -1338,7 +1360,7 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) if (dev->configured == 1) { rc = nix_restore_queue_cfg(eth_dev); if (rc) - goto free_nix_lf; + goto cq_fini; } /* Update the mac address */ @@ -1362,9 +1384,21 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) dev->configured_nb_tx_qs = data->nb_tx_queues; return 0; +cq_fini: + oxt2_nix_unregister_cq_irqs(eth_dev); +q_irq_fini: + oxt2_nix_unregister_queue_irqs(eth_dev); +vlan_fini: + otx2_nix_vlan_fini(eth_dev); +tm_fini: + otx2_nix_tm_fini(eth_dev); free_nix_lf: - rc = nix_lf_free(dev); -fail: + nix_lf_free(dev); +fail_offloads: + dev->rx_offload_flags &= ~nix_rx_offload_flags(eth_dev); + dev->tx_offload_flags &= ~nix_tx_offload_flags(eth_dev); +fail_configure: + dev->configured = 0; return rc; } @@ -1527,6 +1561,16 @@ otx2_nix_dev_start(struct rte_eth_dev *eth_dev) return rc; } + /* Enable PTP if it was requested by the app or if it is already + * enabled in PF owning this VF + */ + memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info)); + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || + otx2_ethdev_is_ptp_en(dev)) + otx2_nix_timesync_enable(eth_dev); + else + otx2_nix_timesync_disable(eth_dev); + rc = npc_rx_enable(dev); if (rc) { otx2_err("Failed to enable NPC rx %d", rc); @@ -1624,6 +1668,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = { .vlan_pvid_set = otx2_nix_vlan_pvid_set, .rx_queue_intr_enable = otx2_nix_rx_queue_intr_enable, .rx_queue_intr_disable = otx2_nix_rx_queue_intr_disable, + .read_clock = otx2_nix_read_clock, }; static inline int @@ -1787,7 +1832,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) dev->tx_offload_capa = nix_get_tx_offload_capa(dev); dev->rx_offload_capa = nix_get_rx_offload_capa(dev); - if (otx2_dev_is_Ax(dev)) { + if (otx2_dev_is_96xx_A0(dev) || + otx2_dev_is_95xx_Ax(dev)) { dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q; dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL; } @@ -2006,7 +2052,7 @@ static const struct rte_pci_id pci_nix_map[] = { static struct rte_pci_driver pci_nix = { .id_table = pci_nix_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA | + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA | RTE_PCI_DRV_INTR_LSC, .probe = nix_probe, .remove = nix_remove,