X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcnxk%2Fcn9k_ethdev.c;h=d34bc6898f79534363cd1d69aa7be0feb5105ad2;hb=28968ad1d39260678e7e26a2901a396a7fb34351;hp=08c86f9e6b7b13497dc24f4e7158f1834f040281;hpb=7eabd6c637739a37f480053befeb4ab0407c43e2;p=dpdk.git diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c index 08c86f9e6b..d34bc6898f 100644 --- a/drivers/net/cnxk/cn9k_ethdev.c +++ b/drivers/net/cnxk/cn9k_ethdev.c @@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev) struct rte_eth_rxmode *rxmode = &conf->rxmode; uint16_t flags = 0; - if (rxmode->mq_mode == ETH_MQ_RX_RSS && - (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH)) + if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS && + (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) flags |= NIX_RX_OFFLOAD_RSS_F; if (dev->rx_offloads & - (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) + (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) flags |= NIX_RX_OFFLOAD_CHECKSUM_F; if (dev->rx_offloads & - (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) + (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) flags |= NIX_RX_OFFLOAD_CHECKSUM_F; - if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) flags |= NIX_RX_MULTI_SEG_F; - if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)) + if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) flags |= NIX_RX_OFFLOAD_TSTAMP_F; if (!dev->ptype_disable) flags |= NIX_RX_OFFLOAD_PTYPE_F; - if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) flags |= NIX_RX_OFFLOAD_SECURITY_F; return flags; @@ -50,15 +50,15 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev) uint16_t flags = 0; /* Fastpath is dependent on these enums */ - RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52)); - RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52)); - RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52)); - RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54)); - RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55)); - RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58)); - RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59)); - RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60)); - RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41)); + RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52)); + RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52)); + RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52)); + RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54)); + RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55)); + RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58)); + RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59)); + RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60)); + RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41)); RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7); RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9); RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7); @@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev) RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) != offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *)); - if (conf & DEV_TX_OFFLOAD_VLAN_INSERT || - conf & DEV_TX_OFFLOAD_QINQ_INSERT) + if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT || + conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F; - if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM || - conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) + if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM || + conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F; - if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM || - conf & DEV_TX_OFFLOAD_TCP_CKSUM || - conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM) + if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM || + conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM || + conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F; - if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)) flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F; - if (conf & DEV_TX_OFFLOAD_MULTI_SEGS) + if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) flags |= NIX_TX_MULTI_SEG_F; /* Enable Inner checksum for TSO */ - if (conf & DEV_TX_OFFLOAD_TCP_TSO) + if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO) flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F); /* Enable Inner and Outer checksum for Tunnel TSO */ - if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)) + if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F); - if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)) + if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) flags |= NIX_TX_OFFLOAD_TSTAMP_F; - if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) + if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) flags |= NIX_TX_OFFLOAD_SECURITY_F; return flags; @@ -298,9 +298,9 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev) /* Platform specific checks */ if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) && - (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) && - ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) || - (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) { + (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) && + ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) || + (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) { plt_err("Outer IP and SCTP checksum unsupported"); return -EINVAL; } @@ -486,6 +486,7 @@ nix_eth_dev_ops_override(void) cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set; cnxk_eth_dev_ops.timesync_enable = cn9k_nix_timesync_enable; cnxk_eth_dev_ops.timesync_disable = cn9k_nix_timesync_disable; + cnxk_eth_dev_ops.mtr_ops_get = NULL; } static void @@ -553,17 +554,17 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) * TSO not supported for earlier chip revisions */ if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) - dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO); + dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO); /* 50G and 100G to be supported for board version C0 * and above of CN9K. */ if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) { - dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G; - dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G; + dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G; + dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G; } dev->hwcap = 0; @@ -578,6 +579,21 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) } static const struct rte_pci_id cn9k_pci_nix_map[] = { + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_PF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_PF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_PF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_PF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_PF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_AF_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_AF_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_AF_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_AF_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_AF_VF), { .vendor_id = 0, },