X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnetvsc%2Fhn_var.h;h=43642408bc94b815cd56f9889fa7a854966a1ca9;hb=b797b049b50656afb709718a6f75751b49cd515e;hp=b3e0a93d45df1dfecfd73de8ad68c0d1a21170fd;hpb=0312753ef2f37a9fbe62b941f74cad2b20d776d9;p=dpdk.git diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h index b3e0a93d45..43642408bc 100644 --- a/drivers/net/netvsc/hn_var.h +++ b/drivers/net/netvsc/hn_var.h @@ -6,6 +6,8 @@ * All rights reserved. */ +#include + /* * Tunable ethdev params */ @@ -23,13 +25,13 @@ /* Host monitor interval */ #define HN_CHAN_LATENCY_NS 50000 -/* Buffers need to be aligned */ -#ifndef PAGE_SIZE -#define PAGE_SIZE 4096 -#endif +#define HN_TXCOPY_THRESHOLD 512 +#define HN_RXCOPY_THRESHOLD 256 + +#define HN_RX_EXTMBUF_ENABLE 0 #ifndef PAGE_MASK -#define PAGE_MASK (PAGE_SIZE - 1) +#define PAGE_MASK (rte_mem_page_size() - 1) #endif struct hn_data; @@ -39,7 +41,8 @@ struct hn_stats { uint64_t packets; uint64_t bytes; uint64_t errors; - uint64_t nomemory; + uint64_t ring_full; + uint64_t channel_full; uint64_t multicast; uint64_t broadcast; /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */ @@ -52,6 +55,10 @@ struct hn_tx_queue { uint16_t port_id; uint16_t queue_id; uint32_t free_thresh; + struct rte_mempool *txdesc_pool; + const struct rte_memzone *tx_rndis_mz; + void *tx_rndis; + rte_iova_t tx_rndis_iova; /* Applied packet transmission aggregation limits. */ uint32_t agg_szmax; @@ -78,41 +85,77 @@ struct hn_rx_queue { uint16_t port_id; uint16_t queue_id; struct hn_stats stats; - uint64_t ring_full; - uint8_t event_buf[]; + void *event_buf; + struct hn_rx_bufinfo *rxbuf_info; + rte_atomic32_t rxbuf_outstanding; }; /* multi-packet data from host */ struct hn_rx_bufinfo { struct vmbus_channel *chan; - struct hn_data *hv; + struct hn_rx_queue *rxq; uint64_t xactid; struct rte_mbuf_ext_shared_info shinfo; } __rte_cache_aligned; +#define HN_INVALID_PORT UINT16_MAX + +enum vf_device_state { + vf_unknown = 0, + vf_removed, + vf_configured, + vf_started, + vf_stopped, +}; + +struct hn_vf_ctx { + uint16_t vf_port; + + /* We have taken ownership of this VF port from DPDK */ + bool vf_attached; + + /* VSC has requested to switch data path to VF */ + bool vf_vsc_switched; + + /* VSP has reported the VF is present for this NIC */ + bool vf_vsp_reported; + + enum vf_device_state vf_state; +}; + struct hn_data { struct rte_vmbus_device *vmbus; struct hn_rx_queue *primary; + rte_rwlock_t vf_lock; uint16_t port_id; - bool closed; + + struct hn_vf_ctx vf_ctx; + + uint8_t closed; + uint8_t vlan_strip; + uint32_t link_status; uint32_t link_speed; struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */ - struct hn_rx_bufinfo *rxbuf_info; uint32_t rxbuf_section_cnt; /* # of Rx sections */ - volatile uint32_t rxbuf_outstanding; + uint32_t rx_copybreak; + uint32_t rx_extmbuf_enable; uint16_t max_queues; /* Max available queues */ uint16_t num_queues; uint64_t rss_offloads; + rte_spinlock_t chim_lock; struct rte_mem_resource *chim_res; /* UIO resource for Tx */ - struct rte_mempool *tx_pool; /* Tx descriptors */ + struct rte_bitmap *chim_bmap; /* Send buffer map */ + void *chim_bmem; + uint32_t tx_copybreak; uint32_t chim_szmax; /* Max size per buffer */ uint32_t chim_cnt; /* Max packets per buffer */ + uint32_t latency; uint32_t nvs_ver; uint32_t ndis_ver; uint32_t rndis_agg_size; @@ -123,8 +166,16 @@ struct hn_data { rte_atomic32_t rndis_req_id; uint8_t rndis_resp[256]; - struct ether_addr mac_addr; + uint32_t rss_hash; + uint8_t rss_key[40]; + uint16_t rss_ind[128]; + + struct rte_eth_dev_owner owner; + struct vmbus_channel *channels[HN_MAX_CHANNELS]; + + struct rte_devargs devargs; + int eal_hot_plug_retry; }; static inline struct vmbus_channel * @@ -133,20 +184,25 @@ hn_primary_chan(const struct hn_data *hv) return hv->channels[0]; } -void hn_process_events(struct hn_data *hv, uint16_t queue_id); +uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id, + uint32_t tx_limit); uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); -int hn_tx_pool_init(struct rte_eth_dev *dev); +int hn_chim_init(struct rte_eth_dev *dev); +void hn_chim_uninit(struct rte_eth_dev *dev); +int hn_dev_link_update(struct rte_eth_dev *dev, int wait); int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); void hn_dev_tx_queue_release(void *arg); void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx, struct rte_eth_txq_info *qinfo); +int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt); +int hn_dev_tx_descriptor_status(void *arg, uint16_t offset); struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv, uint16_t queue_id, @@ -156,6 +212,73 @@ int hn_dev_rx_queue_setup(struct rte_eth_dev *dev, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp); -void hn_dev_rx_queue_release(void *arg); -void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx, +void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_rxq_info *qinfo); +void hn_dev_rx_queue_release(void *arg); +uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id); +int hn_dev_rx_queue_status(void *rxq, uint16_t offset); +void hn_dev_free_queues(struct rte_eth_dev *dev); + +/* + * Get VF device for existing netvsc device + * Assumes vf_lock is held. + */ +static inline struct rte_eth_dev * +hn_get_vf_dev(const struct hn_data *hv) +{ + if (hv->vf_ctx.vf_attached) + return &rte_eth_devices[hv->vf_ctx.vf_port]; + else + return NULL; +} + +int hn_vf_info_get(struct hn_data *hv, + struct rte_eth_dev_info *info); +int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv); +int hn_vf_configure_locked(struct rte_eth_dev *dev, + const struct rte_eth_conf *dev_conf); +const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev); +int hn_vf_start(struct rte_eth_dev *dev); +void hn_vf_reset(struct rte_eth_dev *dev); +int hn_vf_close(struct rte_eth_dev *dev); +int hn_vf_stop(struct rte_eth_dev *dev); + +int hn_vf_allmulticast_enable(struct rte_eth_dev *dev); +int hn_vf_allmulticast_disable(struct rte_eth_dev *dev); +int hn_vf_promiscuous_enable(struct rte_eth_dev *dev); +int hn_vf_promiscuous_disable(struct rte_eth_dev *dev); +int hn_vf_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); + +int hn_vf_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id); +int hn_vf_tx_queue_status(struct hn_data *hv, uint16_t queue_id, uint16_t offset); + +int hn_vf_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id); + +int hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +int hn_vf_stats_reset(struct rte_eth_dev *dev); +int hn_vf_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int size); +int hn_vf_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, + unsigned int offset, unsigned int n); +int hn_vf_xstats_reset(struct rte_eth_dev *dev); +int hn_vf_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int hn_vf_reta_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +int hn_eth_rmv_event_callback(uint16_t port_id, + enum rte_eth_event_type event __rte_unused, + void *cb_arg, void *out __rte_unused);