X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcnxk%2Fcnxk_ethdev.h;h=a5380a5e0c231f991beffab3f0b6e31f5ca082e5;hb=5fe86db2a0dda614f9578331a28411062b43ba11;hp=5a52489ea3ae02b77281cb320c1a629a49b59487;hpb=06d7544052db16666bbfe29669ccf92b3e0610e8;p=dpdk.git diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h index 5a52489ea3..a5380a5e0c 100644 --- a/drivers/net/cnxk/cnxk_ethdev.h +++ b/drivers/net/cnxk/cnxk_ethdev.h @@ -46,6 +46,10 @@ #define CNXK_NIX_TX_NB_SEG_MAX 9 #endif +#define CNXK_NIX_TX_MSEG_SG_DWORDS \ + ((RTE_ALIGN_MUL_CEIL(CNXK_NIX_TX_NB_SEG_MAX, 3) / 3) + \ + CNXK_NIX_TX_NB_SEG_MAX) + #define CNXK_NIX_RSS_L3_L4_SRC_DST \ (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY | \ ETH_RSS_L4_DST_ONLY) @@ -91,6 +95,9 @@ #define RSS_SCTP_INDEX 4 #define RSS_DMAC_INDEX 5 +/* Default mark value used when none is provided. */ +#define CNXK_FLOW_ACTION_FLAG_DEFAULT 0xffff + #define PTYPE_NON_TUNNEL_WIDTH 16 #define PTYPE_TUNNEL_WIDTH 12 #define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH) @@ -100,6 +107,10 @@ /* Fastpath lookup */ #define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem" +#define CNXK_NIX_UDP_TUN_BITMASK \ + ((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) | \ + (1ull << (PKT_TX_TUNNEL_GENEVE >> 45))) + struct cnxk_eth_qconf { union { struct rte_eth_txconf tx; @@ -160,6 +171,9 @@ struct cnxk_eth_dev { struct cnxk_eth_qconf *tx_qconf; struct cnxk_eth_qconf *rx_qconf; + /* Rx burst for cleanup(Only Primary) */ + eth_rx_burst_t rx_pkt_burst_no_offload; + /* Default mac address */ uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; @@ -204,6 +218,8 @@ extern struct eth_dev_ops cnxk_eth_dev_ops; int cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev); int cnxk_nix_remove(struct rte_pci_device *pci_dev); +int cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *addr); int cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info); int cnxk_nix_configure(struct rte_eth_dev *eth_dev); @@ -215,6 +231,7 @@ int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp); int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid); +int cnxk_nix_dev_start(struct rte_eth_dev *eth_dev); uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev); @@ -223,6 +240,7 @@ uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss, uint8_t rss_level); /* Link */ +void cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set); void cnxk_eth_dev_link_status_cb(struct roc_nix *nix, struct roc_nix_link_info *link); int cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete); @@ -235,4 +253,71 @@ void *cnxk_nix_fastpath_lookup_mem_get(void); int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev); +/* Inlines */ +static __rte_always_inline uint64_t +cnxk_pktmbuf_detach(struct rte_mbuf *m) +{ + struct rte_mempool *mp = m->pool; + uint32_t mbuf_size, buf_len; + struct rte_mbuf *md; + uint16_t priv_size; + uint16_t refcount; + + /* Update refcount of direct mbuf */ + md = rte_mbuf_from_indirect(m); + refcount = rte_mbuf_refcnt_update(md, -1); + + priv_size = rte_pktmbuf_priv_size(mp); + mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size); + buf_len = rte_pktmbuf_data_room_size(mp); + + m->priv_size = priv_size; + m->buf_addr = (char *)m + mbuf_size; + m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size; + m->buf_len = (uint16_t)buf_len; + rte_pktmbuf_reset_headroom(m); + m->data_len = 0; + m->ol_flags = 0; + m->next = NULL; + m->nb_segs = 1; + + /* Now indirect mbuf is safe to free */ + rte_pktmbuf_free(m); + + if (refcount == 0) { + rte_mbuf_refcnt_set(md, 1); + md->data_len = 0; + md->ol_flags = 0; + md->next = NULL; + md->nb_segs = 1; + return 0; + } else { + return 1; + } +} + +static __rte_always_inline uint64_t +cnxk_nix_prefree_seg(struct rte_mbuf *m) +{ + if (likely(rte_mbuf_refcnt_read(m) == 1)) { + if (!RTE_MBUF_DIRECT(m)) + return cnxk_pktmbuf_detach(m); + + m->next = NULL; + m->nb_segs = 1; + return 0; + } else if (rte_mbuf_refcnt_update(m, -1) == 0) { + if (!RTE_MBUF_DIRECT(m)) + return cnxk_pktmbuf_detach(m); + + rte_mbuf_refcnt_set(m, 1); + m->next = NULL; + m->nb_segs = 1; + return 0; + } + + /* Mbuf is having refcount more than 1 so need not to be freed */ + return 1; +} + #endif /* __CNXK_ETHDEV_H__ */