1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2009-2018 Microsoft Corp.
3 * Copyright (c) 2016 Brocade Communications Systems, Inc.
4 * Copyright (c) 2012 NetApp Inc.
5 * Copyright (c) 2012 Citrix Inc.
9 #include <rte_eal_paging.h>
10 #include <ethdev_driver.h>
13 * Tunable ethdev params
15 #define HN_MIN_RX_BUF_SIZE 1024
16 #define HN_MAX_XFER_LEN 2048
17 #define HN_MAX_MAC_ADDRS 1
18 #define HN_MAX_CHANNELS 64
20 /* Claimed to be 12232B */
21 #define HN_MTU_MAX (9 * 1024)
24 #define HN_CHAN_INTERVAL_US 100
26 /* Host monitor interval */
27 #define HN_CHAN_LATENCY_NS 50000
29 #define HN_TXCOPY_THRESHOLD 512
30 #define HN_RXCOPY_THRESHOLD 256
32 #define HN_RX_EXTMBUF_ENABLE 0
35 #define PAGE_MASK (rte_mem_page_size() - 1)
46 uint64_t channel_full;
49 /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
50 uint64_t size_bins[8];
55 struct vmbus_channel *chan;
59 struct rte_mempool *txdesc_pool;
60 const struct rte_memzone *tx_rndis_mz;
62 rte_iova_t tx_rndis_iova;
64 /* Applied packet transmission aggregation limits. */
69 /* Packet transmission aggregation states */
70 struct hn_txdesc *agg_txd;
73 struct rndis_packet_msg *agg_prevpkt;
75 struct hn_stats stats;
80 struct vmbus_channel *chan;
81 struct rte_mempool *mb_pool;
82 struct rte_ring *rx_ring;
84 rte_spinlock_t ring_lock;
88 struct hn_stats stats;
91 struct hn_rx_bufinfo *rxbuf_info;
92 rte_atomic32_t rxbuf_outstanding;
96 /* multi-packet data from host */
97 struct hn_rx_bufinfo {
98 struct vmbus_channel *chan;
99 struct hn_rx_queue *rxq;
101 struct rte_mbuf_ext_shared_info shinfo;
102 } __rte_cache_aligned;
104 #define HN_INVALID_PORT UINT16_MAX
106 enum vf_device_state {
117 /* We have taken ownership of this VF port from DPDK */
120 /* VSC has requested to switch data path to VF */
121 bool vf_vsc_switched;
123 /* VSP has reported the VF is present for this NIC */
124 bool vf_vsp_reported;
126 enum vf_device_state vf_state;
129 struct hv_hotadd_context {
130 LIST_ENTRY(hv_hotadd_context) list;
132 struct rte_devargs da;
133 int eal_hot_plug_retry;
137 struct rte_vmbus_device *vmbus;
138 struct hn_rx_queue *primary;
139 rte_rwlock_t vf_lock;
142 struct hn_vf_ctx vf_ctx;
147 uint32_t link_status;
150 struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */
151 uint32_t rxbuf_section_cnt; /* # of Rx sections */
152 uint32_t rx_copybreak;
153 uint32_t rx_extmbuf_enable;
154 uint16_t max_queues; /* Max available queues */
156 uint64_t rss_offloads;
158 rte_spinlock_t chim_lock;
159 struct rte_mem_resource *chim_res; /* UIO resource for Tx */
160 struct rte_bitmap *chim_bmap; /* Send buffer map */
162 uint32_t tx_copybreak;
163 uint32_t chim_szmax; /* Max size per buffer */
164 uint32_t chim_cnt; /* Max packets per buffer */
169 uint32_t rndis_agg_size;
170 uint32_t rndis_agg_pkts;
171 uint32_t rndis_agg_align;
173 volatile uint32_t rndis_pending;
174 rte_atomic32_t rndis_req_id;
175 uint8_t rndis_resp[256];
179 uint16_t rss_ind[128];
181 struct rte_eth_dev_owner owner;
183 struct vmbus_channel *channels[HN_MAX_CHANNELS];
185 rte_spinlock_t hotadd_lock;
186 LIST_HEAD(hotadd_list, hv_hotadd_context) hotadd_list;
190 static inline struct vmbus_channel *
191 hn_primary_chan(const struct hn_data *hv)
193 return hv->channels[0];
196 uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
199 uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
201 uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
204 int hn_chim_init(struct rte_eth_dev *dev);
205 void hn_chim_uninit(struct rte_eth_dev *dev);
206 int hn_dev_link_update(struct rte_eth_dev *dev, int wait);
207 int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
208 uint16_t nb_desc, unsigned int socket_id,
209 const struct rte_eth_txconf *tx_conf);
210 void hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
211 void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
212 struct rte_eth_txq_info *qinfo);
213 int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
214 int hn_dev_tx_descriptor_status(void *arg, uint16_t offset);
216 struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
218 unsigned int socket_id);
219 int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
220 uint16_t queue_idx, uint16_t nb_desc,
221 unsigned int socket_id,
222 const struct rte_eth_rxconf *rx_conf,
223 struct rte_mempool *mp);
224 void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
225 struct rte_eth_rxq_info *qinfo);
226 void hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
227 uint32_t hn_dev_rx_queue_count(void *rx_queue);
228 int hn_dev_rx_queue_status(void *rxq, uint16_t offset);
229 void hn_dev_free_queues(struct rte_eth_dev *dev);
232 * Get VF device for existing netvsc device
233 * Assumes vf_lock is held.
235 static inline struct rte_eth_dev *
236 hn_get_vf_dev(const struct hn_data *hv)
238 if (hv->vf_ctx.vf_attached)
239 return &rte_eth_devices[hv->vf_ctx.vf_port];
244 int hn_vf_info_get(struct hn_data *hv,
245 struct rte_eth_dev_info *info);
246 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
247 int hn_vf_configure_locked(struct rte_eth_dev *dev,
248 const struct rte_eth_conf *dev_conf);
249 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev);
250 int hn_vf_start(struct rte_eth_dev *dev);
251 void hn_vf_reset(struct rte_eth_dev *dev);
252 int hn_vf_close(struct rte_eth_dev *dev);
253 int hn_vf_stop(struct rte_eth_dev *dev);
255 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev);
256 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev);
257 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev);
258 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev);
259 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
260 struct rte_ether_addr *mc_addr_set,
261 uint32_t nb_mc_addr);
263 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
264 uint16_t queue_idx, uint16_t nb_desc,
265 unsigned int socket_id,
266 const struct rte_eth_txconf *tx_conf);
267 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id);
268 int hn_vf_tx_queue_status(struct hn_data *hv, uint16_t queue_id, uint16_t offset);
270 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
271 uint16_t queue_idx, uint16_t nb_desc,
272 unsigned int socket_id,
273 const struct rte_eth_rxconf *rx_conf,
274 struct rte_mempool *mp);
275 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id);
277 int hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
278 int hn_vf_stats_reset(struct rte_eth_dev *dev);
279 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
280 struct rte_eth_xstat_name *xstats_names,
282 int hn_vf_xstats_get(struct rte_eth_dev *dev,
283 struct rte_eth_xstat *xstats,
284 unsigned int offset, unsigned int n);
285 int hn_vf_xstats_reset(struct rte_eth_dev *dev);
286 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
287 struct rte_eth_rss_conf *rss_conf);
288 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
289 struct rte_eth_rss_reta_entry64 *reta_conf,
291 int hn_eth_rmv_event_callback(uint16_t port_id,
292 enum rte_eth_event_type event __rte_unused,
293 void *cb_arg, void *out __rte_unused);