X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnetvsc%2Fhn_var.h;h=881832d8571991c208461674dffb9898ec3194cf;hb=64767daf3c9eaa4d2b2b00017137ac0566df1c55;hp=bf94d90a763532ea2b43d9794dfd0ea385eda751;hpb=6d13ea8e8e49ab957deae2bba5ecf4a4bfe747d1;p=dpdk.git diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h index bf94d90a76..881832d857 100644 --- a/drivers/net/netvsc/hn_var.h +++ b/drivers/net/netvsc/hn_var.h @@ -40,6 +40,7 @@ struct hn_stats { uint64_t bytes; uint64_t errors; uint64_t ring_full; + uint64_t channel_full; uint64_t multicast; uint64_t broadcast; /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */ @@ -52,6 +53,8 @@ struct hn_tx_queue { uint16_t port_id; uint16_t queue_id; uint32_t free_thresh; + struct rte_mempool *txdesc_pool; + void *tx_rndis; /* Applied packet transmission aggregation limits. */ uint32_t agg_szmax; @@ -96,7 +99,7 @@ struct hn_rx_bufinfo { struct hn_data { struct rte_vmbus_device *vmbus; struct hn_rx_queue *primary; - rte_spinlock_t vf_lock; + rte_rwlock_t vf_lock; uint16_t port_id; uint16_t vf_port; @@ -115,8 +118,10 @@ struct hn_data { uint16_t num_queues; uint64_t rss_offloads; + rte_spinlock_t chim_lock; struct rte_mem_resource *chim_res; /* UIO resource for Tx */ - struct rte_mempool *tx_pool; /* Tx descriptors */ + struct rte_bitmap *chim_bmap; /* Send buffer map */ + void *chim_bmem; uint32_t chim_szmax; /* Max size per buffer */ uint32_t chim_cnt; /* Max packets per buffer */ @@ -131,10 +136,11 @@ struct hn_data { rte_atomic32_t rndis_req_id; uint8_t rndis_resp[256]; - struct rte_ether_addr mac_addr; + uint32_t rss_hash; + uint8_t rss_key[40]; + uint16_t rss_ind[128]; struct rte_eth_dev_owner owner; - struct rte_intr_handle vf_intr; struct vmbus_channel *channels[HN_MAX_CHANNELS]; }; @@ -153,8 +159,8 @@ uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); -int hn_tx_pool_init(struct rte_eth_dev *dev); -void hn_tx_pool_uninit(struct rte_eth_dev *dev); +int hn_chim_init(struct rte_eth_dev *dev); +void hn_chim_uninit(struct rte_eth_dev *dev); int hn_dev_link_update(struct rte_eth_dev *dev, int wait); int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, @@ -163,6 +169,7 @@ void hn_dev_tx_queue_release(void *arg); void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx, struct rte_eth_txq_info *qinfo); int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt); +int hn_dev_tx_descriptor_status(void *arg, uint16_t offset); struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv, uint16_t queue_id, @@ -172,7 +179,11 @@ int hn_dev_rx_queue_setup(struct rte_eth_dev *dev, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp); +void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); void hn_dev_rx_queue_release(void *arg); +uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id); +int hn_dev_rx_queue_status(void *rxq, uint16_t offset); void hn_dev_free_queues(struct rte_eth_dev *dev); /* Check if VF is attached */ @@ -182,22 +193,22 @@ hn_vf_attached(const struct hn_data *hv) return hv->vf_port != HN_INVALID_PORT; } -/* Get VF device for existing netvsc device */ +/* + * Get VF device for existing netvsc device + * Assumes vf_lock is held. + */ static inline struct rte_eth_dev * hn_get_vf_dev(const struct hn_data *hv) { uint16_t vf_port = hv->vf_port; - /* make sure vf_port is loaded */ - rte_smp_rmb(); - if (vf_port == HN_INVALID_PORT) return NULL; else return &rte_eth_devices[vf_port]; } -void hn_vf_info_get(struct hn_data *hv, +int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info); int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv); int hn_vf_configure(struct rte_eth_dev *dev, @@ -208,21 +219,21 @@ void hn_vf_reset(struct rte_eth_dev *dev); void hn_vf_stop(struct rte_eth_dev *dev); void hn_vf_close(struct rte_eth_dev *dev); -void hn_vf_allmulticast_enable(struct rte_eth_dev *dev); -void hn_vf_allmulticast_disable(struct rte_eth_dev *dev); -void hn_vf_promiscuous_enable(struct rte_eth_dev *dev); -void hn_vf_promiscuous_disable(struct rte_eth_dev *dev); +int hn_vf_allmulticast_enable(struct rte_eth_dev *dev); +int hn_vf_allmulticast_disable(struct rte_eth_dev *dev); +int hn_vf_promiscuous_enable(struct rte_eth_dev *dev); +int hn_vf_promiscuous_disable(struct rte_eth_dev *dev); int hn_vf_mc_addr_list(struct rte_eth_dev *dev, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr); -int hn_vf_link_update(struct rte_eth_dev *dev, - int wait_to_complete); int hn_vf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id); +int hn_vf_tx_queue_status(struct hn_data *hv, uint16_t queue_id, uint16_t offset); + int hn_vf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, @@ -231,11 +242,16 @@ int hn_vf_rx_queue_setup(struct rte_eth_dev *dev, void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id); int hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); -void hn_vf_stats_reset(struct rte_eth_dev *dev); +int hn_vf_stats_reset(struct rte_eth_dev *dev); int hn_vf_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned int size); int hn_vf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, - unsigned int n); -void hn_vf_xstats_reset(struct rte_eth_dev *dev); + unsigned int offset, unsigned int n); +int hn_vf_xstats_reset(struct rte_eth_dev *dev); +int hn_vf_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int hn_vf_reta_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size);