2 * This file is provided under a dual BSD/LGPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
5 * GNU LESSER GENERAL PUBLIC LICENSE
7 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
8 * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2.1 of the GNU Lesser General Public License
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * Contact Information:
20 * Wind River Systems, Inc.
25 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
26 * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 #ifndef _RTE_AVP_COMMON_H_
58 #define _RTE_AVP_COMMON_H_
65 #include <rte_common.h>
66 #include <rte_memory.h>
67 #include <rte_ether.h>
68 #include <rte_atomic.h>
76 * AVP name is part of network device name.
78 #define RTE_AVP_NAMESIZE 32
81 * AVP alias is a user-defined value used for lookups from secondary
82 * processes. Typically, this is a UUID.
84 #define RTE_AVP_ALIASSIZE 128
90 RTE_AVP_REQ_UNKNOWN = 0,
91 RTE_AVP_REQ_CHANGE_MTU,
92 RTE_AVP_REQ_CFG_NETWORK_IF,
93 RTE_AVP_REQ_CFG_DEVICE,
94 RTE_AVP_REQ_SHUTDOWN_DEVICE,
98 /**@{ AVP device driver types */
99 #define RTE_AVP_DRIVER_TYPE_UNKNOWN 0
100 #define RTE_AVP_DRIVER_TYPE_DPDK 1
101 #define RTE_AVP_DRIVER_TYPE_KERNEL 2
102 #define RTE_AVP_DRIVER_TYPE_QEMU 3
105 /**@{ AVP device operational modes */
106 #define RTE_AVP_MODE_HOST 0 /**< AVP interface created in host */
107 #define RTE_AVP_MODE_GUEST 1 /**< AVP interface created for export to guest */
108 #define RTE_AVP_MODE_TRACE 2 /**< AVP interface created for packet tracing */
112 * Structure for AVP queue configuration query request/result
114 struct rte_avp_device_config {
115 uint64_t device_id; /**< Unique system identifier */
116 uint32_t driver_type; /**< Device Driver type */
117 uint32_t driver_version; /**< Device Driver version */
118 uint32_t features; /**< Negotiated features */
119 uint16_t num_tx_queues; /**< Number of active transmit queues */
120 uint16_t num_rx_queues; /**< Number of active receive queues */
121 uint8_t if_up; /**< 1: interface up, 0: interface down */
122 } __attribute__ ((__packed__));
125 * Structure for AVP request.
127 struct rte_avp_request {
128 uint32_t req_id; /**< Request id */
131 uint32_t new_mtu; /**< New MTU */
132 uint8_t if_up; /**< 1: interface up, 0: interface down */
133 struct rte_avp_device_config config; /**< Queue configuration */
135 int32_t result; /**< Result for processing request */
136 } __attribute__ ((__packed__));
139 * FIFO struct mapped in a shared memory. It describes a circular buffer FIFO
140 * Write and read should wrap around. FIFO is empty when write == read
141 * Writing should never overwrite the read position
143 struct rte_avp_fifo {
144 volatile unsigned int write; /**< Next position to be written*/
145 volatile unsigned int read; /**< Next position to be read */
146 unsigned int len; /**< Circular buffer length */
147 unsigned int elem_size; /**< Pointer size - for 32/64 bit OS */
148 void *volatile buffer[]; /**< The buffer contains mbuf pointers */
153 * AVP packet buffer header used to define the exchange of packet data.
155 struct rte_avp_desc {
157 void *pkt_mbuf; /**< Reference to packet mbuf */
159 uint16_t ol_flags; /**< Offload features. */
160 void *next; /**< Reference to next buffer in chain */
161 void *data; /**< Start address of data in segment buffer. */
162 uint16_t data_len; /**< Amount of data in segment buffer. */
163 uint8_t nb_segs; /**< Number of segments */
165 uint16_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
167 uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order). */
169 } __attribute__ ((__aligned__(RTE_CACHE_LINE_SIZE), __packed__));
172 /**{ AVP device features */
173 #define RTE_AVP_FEATURE_VLAN_OFFLOAD (1 << 0) /**< Emulated HW VLAN offload */
177 /**@{ Offload feature flags */
178 #define RTE_AVP_TX_VLAN_PKT 0x0001 /**< TX packet is a 802.1q VLAN packet. */
179 #define RTE_AVP_RX_VLAN_PKT 0x0800 /**< RX packet is a 802.1q VLAN packet. */
183 /**@{ AVP PCI identifiers */
184 #define RTE_AVP_PCI_VENDOR_ID 0x1af4
185 #define RTE_AVP_PCI_DEVICE_ID 0x1110
188 /**@{ AVP PCI subsystem identifiers */
189 #define RTE_AVP_PCI_SUB_VENDOR_ID RTE_AVP_PCI_VENDOR_ID
190 #define RTE_AVP_PCI_SUB_DEVICE_ID 0x1104
193 /**@{ AVP PCI BAR definitions */
194 #define RTE_AVP_PCI_MMIO_BAR 0
195 #define RTE_AVP_PCI_MSIX_BAR 1
196 #define RTE_AVP_PCI_MEMORY_BAR 2
197 #define RTE_AVP_PCI_MEMMAP_BAR 4
198 #define RTE_AVP_PCI_DEVICE_BAR 5
199 #define RTE_AVP_PCI_MAX_BAR 6
202 /**@{ AVP PCI BAR name definitions */
203 #define RTE_AVP_MMIO_BAR_NAME "avp-mmio"
204 #define RTE_AVP_MSIX_BAR_NAME "avp-msix"
205 #define RTE_AVP_MEMORY_BAR_NAME "avp-memory"
206 #define RTE_AVP_MEMMAP_BAR_NAME "avp-memmap"
207 #define RTE_AVP_DEVICE_BAR_NAME "avp-device"
210 /**@{ AVP PCI MSI-X vectors */
211 #define RTE_AVP_MIGRATION_MSIX_VECTOR 0 /**< Migration interrupts */
212 #define RTE_AVP_MAX_MSIX_VECTORS 1
215 /**@} AVP Migration status/ack register values */
216 #define RTE_AVP_MIGRATION_NONE 0 /**< Migration never executed */
217 #define RTE_AVP_MIGRATION_DETACHED 1 /**< Device attached during migration */
218 #define RTE_AVP_MIGRATION_ATTACHED 2 /**< Device reattached during migration */
219 #define RTE_AVP_MIGRATION_ERROR 3 /**< Device failed to attach/detach */
222 /**@} AVP MMIO Register Offsets */
223 #define RTE_AVP_REGISTER_BASE 0
224 #define RTE_AVP_INTERRUPT_MASK_OFFSET (RTE_AVP_REGISTER_BASE + 0)
225 #define RTE_AVP_INTERRUPT_STATUS_OFFSET (RTE_AVP_REGISTER_BASE + 4)
226 #define RTE_AVP_MIGRATION_STATUS_OFFSET (RTE_AVP_REGISTER_BASE + 8)
227 #define RTE_AVP_MIGRATION_ACK_OFFSET (RTE_AVP_REGISTER_BASE + 12)
230 /**@} AVP Interrupt Status Mask */
231 #define RTE_AVP_MIGRATION_INTERRUPT_MASK (1 << 1)
232 #define RTE_AVP_APP_INTERRUPTS_MASK 0xFFFFFFFF
233 #define RTE_AVP_NO_INTERRUPTS_MASK 0
237 * Maximum number of memory regions to export
239 #define RTE_AVP_MAX_MAPS 2048
242 * Description of a single memory region
244 struct rte_avp_memmap {
246 rte_iova_t phys_addr;
251 * AVP memory mapping validation marker
253 #define RTE_AVP_MEMMAP_MAGIC 0x20131969
255 /**@{ AVP memory map versions */
256 #define RTE_AVP_MEMMAP_VERSION_1 1
257 #define RTE_AVP_MEMMAP_VERSION RTE_AVP_MEMMAP_VERSION_1
261 * Defines a list of memory regions exported from the host to the guest
263 struct rte_avp_memmap_info {
264 uint32_t magic; /**< Memory validation marker */
265 uint32_t version; /**< Data format version */
267 struct rte_avp_memmap maps[RTE_AVP_MAX_MAPS];
271 * AVP device memory validation marker
273 #define RTE_AVP_DEVICE_MAGIC 0x20131975
275 /**@{ AVP device map versions
276 * WARNING: do not change the format or names of these variables. They are
277 * automatically parsed from the build system to generate the SDK package
280 #define RTE_AVP_RELEASE_VERSION_1 1
281 #define RTE_AVP_RELEASE_VERSION RTE_AVP_RELEASE_VERSION_1
282 #define RTE_AVP_MAJOR_VERSION_0 0
283 #define RTE_AVP_MAJOR_VERSION_1 1
284 #define RTE_AVP_MAJOR_VERSION_2 2
285 #define RTE_AVP_MAJOR_VERSION RTE_AVP_MAJOR_VERSION_2
286 #define RTE_AVP_MINOR_VERSION_0 0
287 #define RTE_AVP_MINOR_VERSION_1 1
288 #define RTE_AVP_MINOR_VERSION_13 13
289 #define RTE_AVP_MINOR_VERSION RTE_AVP_MINOR_VERSION_13
294 * Generates a 32-bit version number from the specified version number
297 #define RTE_AVP_MAKE_VERSION(_release, _major, _minor) \
298 ((((_release) & 0xffff) << 16) | (((_major) & 0xff) << 8) | ((_minor) & 0xff))
302 * Represents the current version of the AVP host driver
303 * WARNING: in the current development branch the host and guest driver
304 * version should always be the same. When patching guest features back to
305 * GA releases the host version number should not be updated unless there was
306 * an actual change made to the host driver.
308 #define RTE_AVP_CURRENT_HOST_VERSION \
309 RTE_AVP_MAKE_VERSION(RTE_AVP_RELEASE_VERSION_1, \
310 RTE_AVP_MAJOR_VERSION_0, \
311 RTE_AVP_MINOR_VERSION_1)
315 * Represents the current version of the AVP guest drivers
317 #define RTE_AVP_CURRENT_GUEST_VERSION \
318 RTE_AVP_MAKE_VERSION(RTE_AVP_RELEASE_VERSION_1, \
319 RTE_AVP_MAJOR_VERSION_2, \
320 RTE_AVP_MINOR_VERSION_13)
323 * Access AVP device version values
325 #define RTE_AVP_GET_RELEASE_VERSION(_version) (((_version) >> 16) & 0xffff)
326 #define RTE_AVP_GET_MAJOR_VERSION(_version) (((_version) >> 8) & 0xff)
327 #define RTE_AVP_GET_MINOR_VERSION(_version) ((_version) & 0xff)
332 * Remove the minor version number so that only the release and major versions
333 * are used for comparisons.
335 #define RTE_AVP_STRIP_MINOR_VERSION(_version) ((_version) >> 8)
339 * Defines the number of mbuf pools supported per device (1 per socket)
341 #define RTE_AVP_MAX_MEMPOOLS 8
344 * Defines address translation parameters for each support mbuf pool
346 struct rte_avp_mempool_info {
348 rte_iova_t phys_addr;
353 * Struct used to create a AVP device. Passed to the kernel in IOCTL call or
354 * via inter-VM shared memory when used in a guest.
356 struct rte_avp_device_info {
357 uint32_t magic; /**< Memory validation marker */
358 uint32_t version; /**< Data format version */
360 char ifname[RTE_AVP_NAMESIZE]; /**< Network device name for AVP */
364 rte_iova_t alloc_phys;
365 rte_iova_t free_phys;
367 uint32_t features; /**< Supported feature bitmap */
368 uint8_t min_rx_queues; /**< Minimum supported receive/free queues */
369 uint8_t num_rx_queues; /**< Recommended number of receive/free queues */
370 uint8_t max_rx_queues; /**< Maximum supported receive/free queues */
371 uint8_t min_tx_queues; /**< Minimum supported transmit/alloc queues */
372 uint8_t num_tx_queues;
373 /**< Recommended number of transmit/alloc queues */
374 uint8_t max_tx_queues; /**< Maximum supported transmit/alloc queues */
376 uint32_t tx_size; /**< Size of each transmit queue */
377 uint32_t rx_size; /**< Size of each receive queue */
378 uint32_t alloc_size; /**< Size of each alloc queue */
379 uint32_t free_size; /**< Size of each free queue */
381 /* Used by Ethtool */
383 rte_iova_t resp_phys;
384 rte_iova_t sync_phys;
387 /* mbuf mempool (used when a single memory area is supported) */
389 rte_iova_t mbuf_phys;
392 struct rte_avp_mempool_info pool[RTE_AVP_MAX_MEMPOOLS];
396 char ethaddr[ETH_ALEN];
398 char ethaddr[ETHER_ADDR_LEN];
401 uint8_t mode; /**< device mode, i.e guest, host, trace */
404 unsigned int mbuf_size;
407 * unique id to differentiate between two instantiations of the same
408 * AVP device (i.e., the guest needs to know if the device has been
409 * deleted and recreated).
413 uint32_t max_rx_pkt_len; /**< Maximum receive unit size */
416 #define RTE_AVP_MAX_QUEUES 8 /**< Maximum number of queues per device */
418 /** Maximum number of chained mbufs in a packet */
419 #define RTE_AVP_MAX_MBUF_SEGMENTS 5
421 #define RTE_AVP_DEVICE "avp"
423 #define RTE_AVP_IOCTL_TEST _IOWR(0, 1, int)
424 #define RTE_AVP_IOCTL_CREATE _IOWR(0, 2, struct rte_avp_device_info)
425 #define RTE_AVP_IOCTL_RELEASE _IOWR(0, 3, struct rte_avp_device_info)
426 #define RTE_AVP_IOCTL_QUERY _IOWR(0, 4, struct rte_avp_device_config)
432 #endif /* _RTE_AVP_COMMON_H_ */