1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2009-2018 Microsoft Corp.
3 * Copyright (c) 2016 Brocade Communications Systems, Inc.
4 * Copyright (c) 2012 NetApp Inc.
5 * Copyright (c) 2012 Citrix Inc.
10 * Tunable ethdev params
12 #define HN_MIN_RX_BUF_SIZE 1024
13 #define HN_MAX_XFER_LEN 2048
14 #define HN_MAX_MAC_ADDRS 1
15 #define HN_MAX_CHANNELS 64
17 /* Claimed to be 12232B */
18 #define HN_MTU_MAX (9 * 1024)
21 #define HN_CHAN_INTERVAL_US 100
23 /* Host monitor interval */
24 #define HN_CHAN_LATENCY_NS 50000
26 /* Buffers need to be aligned */
28 #define PAGE_SIZE 4096
32 #define PAGE_MASK (PAGE_SIZE - 1)
43 uint64_t channel_full;
46 /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
47 uint64_t size_bins[8];
52 struct vmbus_channel *chan;
56 struct rte_mempool *txdesc_pool;
57 const struct rte_memzone *tx_rndis_mz;
59 rte_iova_t tx_rndis_iova;
61 /* Applied packet transmission aggregation limits. */
66 /* Packet transmission aggregation states */
67 struct hn_txdesc *agg_txd;
70 struct rndis_packet_msg *agg_prevpkt;
72 struct hn_stats stats;
77 struct vmbus_channel *chan;
78 struct rte_mempool *mb_pool;
79 struct rte_ring *rx_ring;
81 rte_spinlock_t ring_lock;
85 struct hn_stats stats;
88 struct hn_rx_bufinfo *rxbuf_info;
89 rte_atomic32_t rxbuf_outstanding;
93 /* multi-packet data from host */
94 struct hn_rx_bufinfo {
95 struct vmbus_channel *chan;
96 struct hn_rx_queue *rxq;
98 struct rte_mbuf_ext_shared_info shinfo;
99 } __rte_cache_aligned;
101 #define HN_INVALID_PORT UINT16_MAX
104 struct rte_vmbus_device *vmbus;
105 struct hn_rx_queue *primary;
106 rte_rwlock_t vf_lock;
114 uint32_t link_status;
117 struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */
118 uint32_t rxbuf_section_cnt; /* # of Rx sections */
119 uint16_t max_queues; /* Max available queues */
121 uint64_t rss_offloads;
123 rte_spinlock_t chim_lock;
124 struct rte_mem_resource *chim_res; /* UIO resource for Tx */
125 struct rte_bitmap *chim_bmap; /* Send buffer map */
127 uint32_t chim_szmax; /* Max size per buffer */
128 uint32_t chim_cnt; /* Max packets per buffer */
133 uint32_t rndis_agg_size;
134 uint32_t rndis_agg_pkts;
135 uint32_t rndis_agg_align;
137 volatile uint32_t rndis_pending;
138 rte_atomic32_t rndis_req_id;
139 uint8_t rndis_resp[256];
143 uint16_t rss_ind[128];
145 struct rte_eth_dev_owner owner;
147 struct vmbus_channel *channels[HN_MAX_CHANNELS];
150 static inline struct vmbus_channel *
151 hn_primary_chan(const struct hn_data *hv)
153 return hv->channels[0];
156 uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
159 uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
161 uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
164 int hn_chim_init(struct rte_eth_dev *dev);
165 void hn_chim_uninit(struct rte_eth_dev *dev);
166 int hn_dev_link_update(struct rte_eth_dev *dev, int wait);
167 int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
168 uint16_t nb_desc, unsigned int socket_id,
169 const struct rte_eth_txconf *tx_conf);
170 void hn_dev_tx_queue_release(void *arg);
171 void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
172 struct rte_eth_txq_info *qinfo);
173 int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
174 int hn_dev_tx_descriptor_status(void *arg, uint16_t offset);
176 struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
178 unsigned int socket_id);
179 int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
180 uint16_t queue_idx, uint16_t nb_desc,
181 unsigned int socket_id,
182 const struct rte_eth_rxconf *rx_conf,
183 struct rte_mempool *mp);
184 void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
185 struct rte_eth_rxq_info *qinfo);
186 void hn_dev_rx_queue_release(void *arg);
187 uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id);
188 int hn_dev_rx_queue_status(void *rxq, uint16_t offset);
189 void hn_dev_free_queues(struct rte_eth_dev *dev);
191 /* Check if VF is attached */
193 hn_vf_attached(const struct hn_data *hv)
195 return hv->vf_port != HN_INVALID_PORT;
199 * Get VF device for existing netvsc device
200 * Assumes vf_lock is held.
202 static inline struct rte_eth_dev *
203 hn_get_vf_dev(const struct hn_data *hv)
205 uint16_t vf_port = hv->vf_port;
207 if (vf_port == HN_INVALID_PORT)
210 return &rte_eth_devices[vf_port];
213 int hn_vf_info_get(struct hn_data *hv,
214 struct rte_eth_dev_info *info);
215 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
216 int hn_vf_configure(struct rte_eth_dev *dev,
217 const struct rte_eth_conf *dev_conf);
218 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev);
219 int hn_vf_start(struct rte_eth_dev *dev);
220 void hn_vf_reset(struct rte_eth_dev *dev);
221 int hn_vf_close(struct rte_eth_dev *dev);
222 int hn_vf_stop(struct rte_eth_dev *dev);
224 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev);
225 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev);
226 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev);
227 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev);
228 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
229 struct rte_ether_addr *mc_addr_set,
230 uint32_t nb_mc_addr);
232 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
233 uint16_t queue_idx, uint16_t nb_desc,
234 unsigned int socket_id,
235 const struct rte_eth_txconf *tx_conf);
236 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id);
237 int hn_vf_tx_queue_status(struct hn_data *hv, uint16_t queue_id, uint16_t offset);
239 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
240 uint16_t queue_idx, uint16_t nb_desc,
241 unsigned int socket_id,
242 const struct rte_eth_rxconf *rx_conf,
243 struct rte_mempool *mp);
244 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id);
246 int hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
247 int hn_vf_stats_reset(struct rte_eth_dev *dev);
248 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
249 struct rte_eth_xstat_name *xstats_names,
251 int hn_vf_xstats_get(struct rte_eth_dev *dev,
252 struct rte_eth_xstat *xstats,
253 unsigned int offset, unsigned int n);
254 int hn_vf_xstats_reset(struct rte_eth_dev *dev);
255 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
256 struct rte_eth_rss_conf *rss_conf);
257 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
258 struct rte_eth_rss_reta_entry64 *reta_conf,