* - The driver-oriented Ethernet API that exports functions allowing
* an Ethernet Poll Mode Driver (PMD) to allocate an Ethernet device instance,
* create memzone for HW rings and process registered callbacks, and so on.
- * PMDs should include rte_ethdev_driver.h instead of this header.
+ * PMDs should include ethdev_driver.h instead of this header.
*
* By default, all the functions of the Ethernet Device API exported by a PMD
* are lock-free functions which assume to not be invoked in parallel on
/* Use this macro to check if LRO API is supported */
#define RTE_ETHDEV_HAS_LRO_SUPPORT
+/* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#define RTE_ETHDEV_DEBUG_RX
+#define RTE_ETHDEV_DEBUG_TX
+#endif
+
#include <rte_compat.h>
#include <rte_log.h>
#include <rte_interrupts.h>
#include <rte_common.h>
#include <rte_config.h>
#include <rte_ether.h>
+#include <rte_power_intrinsics.h>
#include "rte_ethdev_trace_fp.h"
#include "rte_dev_info.h"
* Not all statistics fields in struct rte_eth_stats are supported
* by any type of network interface card (NIC). If any statistics
* field is not supported, its value is 0.
+ * All byte-related statistics do not include Ethernet FCS regardless
+ * of whether these bytes have been delivered to the application
+ * (see DEV_RX_OFFLOAD_KEEP_CRC).
*/
struct rte_eth_stats {
uint64_t ipackets; /**< Total number of successfully received packets. */
uint64_t ierrors; /**< Total number of erroneous received packets. */
uint64_t oerrors; /**< Total number of failed transmitted packets. */
uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */
+ /* Queue stats are limited to max 256 queues */
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
/**< Total number of queue RX packets. */
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
#define ETH_RSS_L2TPV3 (1ULL << 29)
#define ETH_RSS_PFCP (1ULL << 30)
#define ETH_RSS_PPPOE (1ULL << 31)
+#define ETH_RSS_ECPRI (1ULL << 32)
+#define ETH_RSS_MPLS (1ULL << 33)
/*
* We use the following macros to combine with above ETH_RSS_* for
ETH_RSS_PORT | \
ETH_RSS_VXLAN | \
ETH_RSS_GENEVE | \
- ETH_RSS_NVGRE)
+ ETH_RSS_NVGRE | \
+ ETH_RSS_MPLS)
/*
* Definitions used for redirection table entry size.
void *reserved_ptrs[2]; /**< Reserved for future fields */
};
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice.
+ *
+ * A structure used to configure an Rx packet segment to split.
+ *
+ * If RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag is set in offloads field,
+ * the PMD will split the received packets into multiple segments
+ * according to the specification in the description array:
+ *
+ * - The first network buffer will be allocated from the memory pool,
+ * specified in the first array element, the second buffer, from the
+ * pool in the second element, and so on.
+ *
+ * - The offsets from the segment description elements specify
+ * the data offset from the buffer beginning except the first mbuf.
+ * The first segment offset is added with RTE_PKTMBUF_HEADROOM.
+ *
+ * - The lengths in the elements define the maximal data amount
+ * being received to each segment. The receiving starts with filling
+ * up the first mbuf data buffer up to specified length. If the
+ * there are data remaining (packet is longer than buffer in the first
+ * mbuf) the following data will be pushed to the next segment
+ * up to its own length, and so on.
+ *
+ * - If the length in the segment description element is zero
+ * the actual buffer size will be deduced from the appropriate
+ * memory pool properties.
+ *
+ * - If there is not enough elements to describe the buffer for entire
+ * packet of maximal length the following parameters will be used
+ * for the all remaining segments:
+ * - pool from the last valid element
+ * - the buffer size from this pool
+ * - zero offset
+ */
+struct rte_eth_rxseg_split {
+ struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
+ uint16_t length; /**< Segment data length, configures split point. */
+ uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
+ uint32_t reserved; /**< Reserved field. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice.
+ *
+ * A common structure used to describe Rx packet segment properties.
+ */
+union rte_eth_rxseg {
+ /* The settings for buffer split offload. */
+ struct rte_eth_rxseg_split split;
+ /* The other features settings should be added here. */
+};
+
/**
* A structure used to configure an RX ring of an Ethernet port.
*/
uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors. */
uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */
uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
+ uint16_t rx_nseg; /**< Number of descriptions in rx_seg array. */
/**
* Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
* Only offloads set on rx_queue_offload_capa or rx_offload_capa
* fields on rte_eth_dev_info structure are allowed to be set.
*/
uint64_t offloads;
+ /**
+ * Points to the array of segment descriptions for an entire packet.
+ * Array elements are properties for consecutive Rx segments.
+ *
+ * The supported capabilities of receiving segmentation is reported
+ * in rte_eth_dev_info.rx_seg_capa field.
+ */
+ union rte_eth_rxseg *rx_seg;
uint64_t reserved_64s[2]; /**< Reserved for future fields */
void *reserved_ptrs[2]; /**< Reserved for future fields */
* A structure used to configure hairpin binding.
*/
struct rte_eth_hairpin_conf {
- uint16_t peer_count; /**< The number of peers. */
+ uint32_t peer_count:16; /**< The number of peers. */
+
+ /**
+ * Explicit Tx flow rule mode.
+ * One hairpin pair of queues should have the same attribute.
+ *
+ * - When set, the user should be responsible for inserting the hairpin
+ * Tx part flows and removing them.
+ * - When clear, the PMD will try to handle the Tx part of the flows,
+ * e.g., by splitting one flow into two parts.
+ */
+ uint32_t tx_explicit:1;
+
+ /**
+ * Manually bind hairpin queues.
+ * One hairpin pair of queues should have the same attribute.
+ *
+ * - When set, to enable hairpin, the user should call the hairpin bind
+ * function after all the queues are set up properly and the ports are
+ * started. Also, the hairpin unbind function should be called
+ * accordingly before stopping a port that with hairpin configured.
+ * - When clear, the PMD will try to enable the hairpin with the queues
+ * configured automatically during port start.
+ */
+ uint32_t manual_bind:1;
+ uint32_t reserved:14; /**< Reserved bits. */
struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
};
};
/**
- * Tunneled type.
+ * Tunnel type for device-specific classifier configuration.
+ * @see rte_eth_udp_tunnel
*/
enum rte_eth_tunnel_type {
RTE_TUNNEL_TYPE_NONE = 0,
RTE_TUNNEL_TYPE_IP_IN_GRE,
RTE_L2_TUNNEL_TYPE_E_TAG,
RTE_TUNNEL_TYPE_VXLAN_GPE,
+ RTE_TUNNEL_TYPE_ECPRI,
RTE_TUNNEL_TYPE_MAX,
};
/**
* UDP tunneling configuration.
- * Used to config the UDP port for a type of tunnel.
- * NICs need the UDP port to identify the tunnel type.
- * Normally a type of tunnel has a default UDP port, this structure can be used
- * in case if the users want to change or support more UDP port.
+ *
+ * Used to configure the classifier of a device,
+ * associating an UDP port with a type of tunnel.
+ *
+ * Some NICs may need such configuration to properly parse a tunnel
+ * with any standard or custom UDP port.
*/
struct rte_eth_udp_tunnel {
uint16_t udp_port; /**< UDP port used for the tunnel. */
- uint8_t prot_type; /**< Tunnel type. Defined in rte_eth_tunnel_type. */
+ uint8_t prot_type; /**< Tunnel type. @see rte_eth_tunnel_type */
};
/**
#define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
#define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
#define DEV_RX_OFFLOAD_SCATTER 0x00002000
+/**
+ * Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
+ * and RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME is set in ol_flags.
+ * The mbuf field and flag are registered when the offload is configured.
+ */
#define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
#define DEV_RX_OFFLOAD_SECURITY 0x00008000
#define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
#define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
#define DEV_RX_OFFLOAD_RSS_HASH 0x00080000
+#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
#define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
DEV_RX_OFFLOAD_UDP_CKSUM | \
#define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
/** Device supports outer UDP checksum */
#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
-
-/** Device supports send on timestamp */
+/**
+ * Device sends on time read from RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
+ * if RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME is set in ol_flags.
+ * The mbuf field and flag are registered when the offload is configured.
+ */
#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
-
-
-#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
-/**< Device supports Rx queue setup after device started*/
-#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
-/**< Device supports Tx queue setup after device started*/
-
/*
* If new Tx offload capabilities are defined, they also must be
* mentioned in rte_tx_offload_names in rte_ethdev.c file.
*/
+/**@{@name Device capabilities
+ * Non-offload capabilities reported in rte_eth_dev_info.dev_capa.
+ */
+/** Device supports Rx queue setup after device started. */
+#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
+/** Device supports Tx queue setup after device started. */
+#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
+/**@}*/
+
/*
* Fallback default preferred Rx/Tx port parameters.
* These are used if an application requests default parameters
*/
};
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice.
+ *
+ * Ethernet device Rx buffer segmentation capabilities.
+ */
+struct rte_eth_rxseg_capa {
+ __extension__
+ uint32_t multi_pools:1; /**< Supports receiving to multiple pools.*/
+ uint32_t offset_allowed:1; /**< Supports buffer offsets. */
+ uint32_t offset_align_log2:4; /**< Required offset alignment. */
+ uint16_t max_nseg; /**< Maximum amount of segments to split. */
+ uint16_t reserved; /**< Reserved field. */
+};
+
/**
* Ethernet device information
*/
+/**
+ * Ethernet device representor port type.
+ */
+enum rte_eth_representor_type {
+ RTE_ETH_REPRESENTOR_NONE, /**< not a representor. */
+ RTE_ETH_REPRESENTOR_VF, /**< representor of Virtual Function. */
+ RTE_ETH_REPRESENTOR_SF, /**< representor of Sub Function. */
+ RTE_ETH_REPRESENTOR_PF, /**< representor of Physical Function. */
+};
+
/**
* A structure used to retrieve the contextual information of
* an Ethernet device, such as the controlling driver of the
/** Maximum number of hash MAC addresses for MTA and UTA. */
uint16_t max_vfs; /**< Maximum number of VFs. */
uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
+ struct rte_eth_rxseg_capa rx_seg_capa; /**< Segmentation capability.*/
uint64_t rx_offload_capa;
/**< All RX offload capabilities including all per-queue ones */
uint64_t tx_offload_capa;
#define RTE_ETH_DEV_REPRESENTOR 0x0010
/** Device does not support MAC change after started */
#define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
+/**
+ * Queue xstats filled automatically by ethdev layer.
+ * PMDs filling the queue xstats themselves should not set this flag
+ */
+#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040
/**
* Iterates over valid ethdev ports owned by a specific owner.
* No need to repeat any bit in rx_conf->offloads which has already been
* enabled in rte_eth_dev_configure() at port level. An offloading enabled
* at port level can't be disabled at queue level.
+ * The configuration structure also contains the pointer to the array
+ * of the receiving buffer segment descriptions, see rx_seg and rx_nseg
+ * fields, this extended configuration might be used by split offloads like
+ * RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT. If mp_pool is not NULL,
+ * the extended configuration fields must be set to NULL and zero.
* @param mb_pool
* The pointer to the memory pool from which to allocate *rte_mbuf* network
- * memory buffers to populate each descriptor of the receive ring.
+ * memory buffers to populate each descriptor of the receive ring. There are
+ * two options to provide Rx buffer configuration:
+ * - single pool:
+ * mb_pool is not NULL, rx_conf.rx_nseg is 0.
+ * - multiple segments description:
+ * mb_pool is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not 0.
+ * Taken only if flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT is set in offloads.
+ *
* @return
* - 0: Success, receive queue correctly set up.
* - -EIO: if device is removed.
+ * - -ENODEV: if *port_id* is invalid.
* - -EINVAL: The memory pool pointer is null or the size of network buffers
* which can be allocated from this memory pool does not fit the various
* buffer sizes allowed by the device controller.
*
* @return
* - (0) if successful.
+ * - (-ENODEV) if *port_id* is invalid.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
* - (-ENOMEM) if unable to allocate the resources.
*
* @return
* - (0) if successful.
+ * - (-ENODEV) if *port_id* is invalid.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
* - (-ENOMEM) if unable to allocate the resources.
(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
const struct rte_eth_hairpin_conf *conf);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Get all the hairpin peer Rx / Tx ports of the current port.
+ * The caller should ensure that the array is large enough to save the ports
+ * list.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param peer_ports
+ * Pointer to the array to store the peer ports list.
+ * @param len
+ * Length of the array to store the port identifiers.
+ * @param direction
+ * Current port to peer port direction
+ * positive - current used as Tx to get all peer Rx ports.
+ * zero - current used as Rx to get all peer Tx ports.
+ *
+ * @return
+ * - (0 or positive) actual peer ports number.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENODEV) if *port_id* invalid
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - Others detailed errors from PMD drivers.
+ */
+__rte_experimental
+int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
+ size_t len, uint32_t direction);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Bind all hairpin Tx queues of one port to the Rx queues of the peer port.
+ * It is only allowed to call this function after all hairpin queues are
+ * configured properly and the devices are in started state.
+ *
+ * @param tx_port
+ * The identifier of the Tx port.
+ * @param rx_port
+ * The identifier of peer Rx port.
+ * RTE_MAX_ETHPORTS is allowed for the traversal of all devices.
+ * Rx port ID could have the same value as Tx port ID.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if Tx port ID is invalid.
+ * - (-EBUSY) if device is not in started state.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - Others detailed errors from PMD drivers.
+ */
+__rte_experimental
+int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Unbind all hairpin Tx queues of one port from the Rx queues of the peer port.
+ * This should be called before closing the Tx or Rx devices, if the bind
+ * function is called before.
+ * After unbinding the hairpin ports pair, it is allowed to bind them again.
+ * Changing queues configuration should be after stopping the device(s).
+ *
+ * @param tx_port
+ * The identifier of the Tx port.
+ * @param rx_port
+ * The identifier of peer Rx port.
+ * RTE_MAX_ETHPORTS is allowed for traversal of all devices.
+ * Rx port ID could have the same value as Tx port ID.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if Tx port ID is invalid.
+ * - (-EBUSY) if device is in stopped state.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - Others detailed errors from PMD drivers.
+ */
+__rte_experimental
+int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
+
/**
* Return the NUMA socket to which an Ethernet device is connected
*
* to rte_eth_dev_configure().
* @return
* - 0: Success, the receive queue is started.
- * - -EINVAL: The port_id or the queue_id out of range or belong to hairpin.
+ * - -ENODEV: if *port_id* is invalid.
+ * - -EINVAL: The queue_id out of range or belong to hairpin.
* - -EIO: if device is removed.
* - -ENOTSUP: The function not supported in PMD driver.
*/
* to rte_eth_dev_configure().
* @return
* - 0: Success, the receive queue is stopped.
- * - -EINVAL: The port_id or the queue_id out of range or belong to hairpin.
+ * - -ENODEV: if *port_id* is invalid.
+ * - -EINVAL: The queue_id out of range or belong to hairpin.
* - -EIO: if device is removed.
* - -ENOTSUP: The function not supported in PMD driver.
*/
* to rte_eth_dev_configure().
* @return
* - 0: Success, the transmit queue is started.
- * - -EINVAL: The port_id or the queue_id out of range or belong to hairpin.
+ * - -ENODEV: if *port_id* is invalid.
+ * - -EINVAL: The queue_id out of range or belong to hairpin.
* - -EIO: if device is removed.
* - -ENOTSUP: The function not supported in PMD driver.
*/
* to rte_eth_dev_configure().
* @return
* - 0: Success, the transmit queue is stopped.
- * - -EINVAL: The port_id or the queue_id out of range or belong to hairpin.
+ * - -ENODEV: if *port_id* is invalid.
+ * - -EINVAL: The queue_id out of range or belong to hairpin.
* - -EIO: if device is removed.
* - -ENOTSUP: The function not supported in PMD driver.
*/
*
* @param port_id
* The port identifier of the Ethernet device.
+ * @return
+ * - 0: Success, Ethernet device stopped.
+ * - <0: Error code of the driver device stop function.
*/
-void rte_eth_dev_stop(uint16_t port_id);
+int rte_eth_dev_stop(uint16_t port_id);
/**
* Link up an Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
+ * @return
+ * - Zero if the port is closed successfully.
+ * - Negative if something went wrong.
*/
-void rte_eth_dev_close(uint16_t port_id);
+int rte_eth_dev_close(uint16_t port_id);
/**
* Reset a Ethernet device and keep its port id.
*
* @return
* - (0) if successful.
- * - (-EINVAL) if port identifier is invalid.
+ * - (-ENODEV) if *port_id* is invalid.
* - (-ENOTSUP) if hardware doesn't support this function.
* - (-EPERM) if not ran from the primary process.
* - (-EIO) if re-initialisation failed or device is removed.
* The per-queue packet statistics functionality number that the transmit
* queue is to be assigned.
* The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1].
+ * Max RTE_ETHDEV_QUEUE_STAT_CNTRS being 256.
* @return
* Zero if successful. Non-zero otherwise.
*/
* The per-queue packet statistics functionality number that the receive
* queue is to be assigned.
* The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1].
+ * Max RTE_ETHDEV_QUEUE_STAT_CNTRS being 256.
* @return
* Zero if successful. Non-zero otherwise.
*/
* rte_eth_dev_info_get().
* @return
* - (0) if successful.
+ * - (-ENODEV) if *port_id* is invalid.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
* - (-EIO) if device is removed.
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
- /**
+/**
* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
*
* @param port_id
* rte_eth_dev_info_get().
* @return
* - (0) if successful.
+ * - (-ENODEV) if *port_id* is invalid.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
* - (-EIO) if device is removed.
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
- /**
+/**
* Updates unicast hash table for receiving packet with the given destination
* MAC address, and the packet is routed to all VFs for which the RX mode is
* accept packets that match the unicast hash table.
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
uint8_t on);
- /**
+/**
* Updates all unicast hash bitmaps for receiving packet with any Unicast
* Ethernet MAC addresses,the packet is routed to all VFs for which the RX
* mode is accept packets that match the unicast hash table.
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
uint16_t tx_rate);
- /**
+/**
* Configuration of Receive Side Scaling hash computation of Ethernet device.
*
* @param port_id
int rte_eth_dev_rss_hash_update(uint16_t port_id,
struct rte_eth_rss_conf *rss_conf);
- /**
+/**
* Retrieve current configuration of Receive Side Scaling hash computation
* of Ethernet device.
*
rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
struct rte_eth_rss_conf *rss_conf);
- /**
- * Add UDP tunneling port for a specific type of tunnel.
- * The packets with this UDP port will be identified as this type of tunnel.
- * Before enabling any offloading function for a tunnel, users can call this API
- * to change or add more UDP port for the tunnel. So the offloading function
- * can take effect on the packets with the specific UDP port.
+/**
+ * Add UDP tunneling port for a type of tunnel.
+ *
+ * Some NICs may require such configuration to properly parse a tunnel
+ * with any standard or custom UDP port.
+ * The packets with this UDP port will be parsed for this type of tunnel.
+ * The device parser will also check the rest of the tunnel headers
+ * before classifying the packet.
+ *
+ * With some devices, this API will affect packet classification, i.e.:
+ * - mbuf.packet_type reported on Rx
+ * - rte_flow rules with tunnel items
*
* @param port_id
* The port identifier of the Ethernet device.
rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
struct rte_eth_udp_tunnel *tunnel_udp);
- /**
- * Delete UDP tunneling port a specific type of tunnel.
- * The packets with this UDP port will not be identified as this type of tunnel
- * any more.
- * Before enabling any offloading function for a tunnel, users can call this API
- * to delete a UDP port for the tunnel. So the offloading function will not take
- * effect on the packets with the specific UDP port.
+/**
+ * Delete UDP tunneling port for a type of tunnel.
+ *
+ * The packets with this UDP port will not be classified as this type of tunnel
+ * anymore if the device use such mapping for tunnel packet classification.
+ *
+ * @see rte_eth_dev_udp_tunnel_port_add
*
* @param port_id
* The port identifier of the Ethernet device.
rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
struct rte_eth_udp_tunnel *tunnel_udp);
-/**
- * Check whether the filter type is supported on an Ethernet device.
- * All the supported filter types are defined in 'rte_eth_ctrl.h'.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param filter_type
- * Filter type.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support this filter type.
- * - (-ENODEV) if *port_id* invalid.
- * - (-EIO) if device is removed.
- */
-__rte_deprecated
-int rte_eth_dev_filter_supported(uint16_t port_id,
- enum rte_filter_type filter_type);
-
-/**
- * Take operations to assigned filter type on an Ethernet device.
- * All the supported operations and filter types are defined in 'rte_eth_ctrl.h'.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param filter_type
- * Filter type.
- * @param filter_op
- * Type of operation.
- * @param arg
- * A pointer to arguments defined specifically for the operation.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-ENODEV) if *port_id* invalid.
- * - (-EIO) if device is removed.
- * - others depends on the specific operations implementation.
- */
-__rte_deprecated
-int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg);
-
/**
* Get DCB information on an Ethernet device.
*
*
* @return
* - 0: Success. Callback was removed.
+ * - -ENODEV: If *port_id* is invalid.
* - -ENOTSUP: Callback support is not available.
- * - -EINVAL: The port_id or the queue_id is out of range, or the callback
+ * - -EINVAL: The queue_id is out of range, or the callback
* is NULL or not found for the port/queue.
*/
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
*
* @return
* - 0: Success. Callback was removed.
+ * - -ENODEV: If *port_id* is invalid.
* - -ENOTSUP: Callback support is not available.
- * - -EINVAL: The port_id or the queue_id is out of range, or the callback
+ * - -EINVAL: The queue_id is out of range, or the callback
* is NULL or not found for the port/queue.
*/
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
*
* @return
* - 0: Success
+ * - -ENODEV: If *port_id* is invalid.
* - -ENOTSUP: routine is not supported by the device PMD.
- * - -EINVAL: The port_id or the queue_id is out of range, or the queue
+ * - -EINVAL: The queue_id is out of range, or the queue
* is hairpin queue.
*/
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
*
* @return
* - 0: Success
+ * - -ENODEV: If *port_id* is invalid.
* - -ENOTSUP: routine is not supported by the device PMD.
- * - -EINVAL: The port_id or the queue_id is out of range, or the queue
+ * - -EINVAL: The queue_id is out of range, or the queue
* is hairpin queue.
*/
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
*
* @return
* - 0: Success
+ * - -ENODEV: If *port_id* is invalid.
* - -ENOTSUP: routine is not supported by the device PMD.
- * - -EINVAL: The port_id or the queue_id is out of range.
+ * - -EINVAL: The queue_id is out of range.
*/
__rte_experimental
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
*
* @return
* - 0: Success
+ * - -ENODEV: If *port_id* is invalid.
* - -ENOTSUP: routine is not supported by the device PMD.
- * - -EINVAL: The port_id or the queue_id is out of range.
+ * - -EINVAL: The queue_id is out of range.
*/
__rte_experimental
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
struct rte_eth_burst_mode *mode);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve the monitor condition for a given receive queue.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The Rx queue on the Ethernet device for which information
+ * will be retrieved.
+ * @param pmc
+ * The pointer to power-optimized monitoring condition structure.
+ *
+ * @return
+ * - 0: Success.
+ * -ENOTSUP: Operation not supported.
+ * -EINVAL: Invalid parameters.
+ * -ENODEV: Invalid port ID.
+ */
+__rte_experimental
+int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
+ struct rte_power_monitor_cond *pmc);
+
/**
* Retrieve device registers and register attributes (number of registers and
* register size)
* @return
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
* - (-ENODEV) if *port_id* invalid.
* - (-EIO) if device is removed.
* - others depends on the specific operations implementation.
* @return
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
* - (-ENODEV) if *port_id* invalid.
* - (-EIO) if device is removed.
* - others depends on the specific operations implementation.
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
* - (-EIO) if device is removed.
* - others depends on the specific operations implementation.
*/
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
* - (-EIO) if device is removed.
* - others depends on the specific operations implementation.
*/
* @return
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
* - (-ENODEV) if *port_id* invalid.
* - (-EIO) if device is removed.
* - others depends on the specific operations implementation.
* rte_eth_read_clock(port, base_clock);
*
* Then, convert the raw mbuf timestamp with:
- * base_time_sec + (double)(mbuf->timestamp - base_clock) / freq;
+ * base_time_sec + (double)(*timestamp_dynfield(mbuf) - base_clock) / freq;
*
* This simple example will not provide a very good accuracy. One must
* at least measure multiple times the frequency and do a regression.
int
rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
-/**
- * Config l2 tunnel ether type of an Ethernet device for filtering specific
- * tunnel packets by ether type.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param l2_tunnel
- * l2 tunnel configuration.
- *
- * @return
- * - (0) if successful.
- * - (-ENODEV) if port identifier is invalid.
- * - (-EIO) if device is removed.
- * - (-ENOTSUP) if hardware doesn't support tunnel type.
- */
-int
-rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
- struct rte_eth_l2_tunnel_conf *l2_tunnel);
-
-/**
- * Enable/disable l2 tunnel offload functions. Include,
- * 1, The ability of parsing a type of l2 tunnel of an Ethernet device.
- * Filtering, forwarding and offloading this type of tunnel packets depend on
- * this ability.
- * 2, Stripping the l2 tunnel tag.
- * 3, Insertion of the l2 tunnel tag.
- * 4, Forwarding the packets based on the l2 tunnel tag.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param l2_tunnel
- * l2 tunnel parameters.
- * @param mask
- * Indicate the offload function.
- * @param en
- * Enable or disable this function.
- *
- * @return
- * - (0) if successful.
- * - (-ENODEV) if port identifier is invalid.
- * - (-EIO) if device is removed.
- * - (-ENOTSUP) if hardware doesn't support tunnel type.
- */
-int
-rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
- struct rte_eth_l2_tunnel_conf *l2_tunnel,
- uint32_t mask,
- uint8_t en);
-
/**
* Get the port id from device name. The device name should be specified
* as below:
* Buffer of size RTE_ETH_NAME_MAX_LEN to store the name.
* @return
* - (0) if successful.
+* - (-ENODEV) if *port_id* is invalid.
* - (-EINVAL) on failure.
*/
int
int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
struct rte_eth_hairpin_cap *cap);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice.
+ *
+ * ethernet device representor ID range entry
+ */
+struct rte_eth_representor_range {
+ enum rte_eth_representor_type type; /**< Representor type */
+ int controller; /**< Controller index */
+ int pf; /**< Physical function index */
+ __extension__
+ union {
+ int vf; /**< VF start index */
+ int sf; /**< SF start index */
+ };
+ uint32_t id_base; /**< Representor ID start index */
+ uint32_t id_end; /**< Representor ID end index */
+ char name[RTE_DEV_NAME_MAX_LEN]; /**< Representor name */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice.
+ *
+ * Ethernet device representor information
+ */
+struct rte_eth_representor_info {
+ uint16_t controller; /**< Controller ID of caller device. */
+ uint16_t pf; /**< Physical function ID of caller device. */
+ struct rte_eth_representor_range ranges[];/**< Representor ID range. */
+};
+
+/**
+ * Retrieve the representor info of the device.
+ *
+ * Get device representor info to be able to calculate a unique
+ * representor ID. @see rte_eth_representor_id_get helper.
+ *
+ * @param port_id
+ * The port identifier of the device.
+ * @param info
+ * A pointer to a representor info structure.
+ * NULL to return number of range entries and allocate memory
+ * for next call to store detail.
+ * @return
+ * - (-ENOTSUP) if operation is not supported.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
+ * - (>=0) number of representor range entries supported by device.
+ */
+__rte_experimental
+int rte_eth_representor_info_get(uint16_t port_id,
+ struct rte_eth_representor_info *info);
+
#include <rte_ethdev_core.h>
/**
* burst-oriented optimizations in both synchronous and asynchronous
* packet processing environments with no overhead in both cases.
*
+ * @note
+ * Some drivers using vector instructions require that *nb_pkts* is
+ * divisible by 4 or 8, depending on the driver implementation.
+ *
* The rte_eth_rx_burst() function does not provide any error
* notification to avoid the corresponding overhead. As a hint, the
* upper-level application might check the status of the device link once
* must be large enough to store *nb_pkts* pointers in it.
* @param nb_pkts
* The maximum number of packets to retrieve.
+ * The value must be divisible by 8 in order to work with any driver.
* @return
* The number of packets actually retrieved, which is the number
* of pointers to *rte_mbuf* structures effectively supplied to the
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
uint16_t nb_rx;
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_RX
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
* The queue id on the specific port.
* @return
* The number of used descriptors in the specific queue, or:
- * (-EINVAL) if *port_id* or *queue_id* is invalid
+ * - (-ENODEV) if *port_id* is invalid.
+ * (-EINVAL) if *queue_id* is invalid
* (-ENOTSUP) if the device does not support this function
*/
static inline int
{
struct rte_eth_dev *dev;
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP);
if (queue_id >= dev->data->nb_rx_queues ||
struct rte_eth_dev *dev;
void *rxq;
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_RX
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
#endif
dev = &rte_eth_devices[port_id];
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_RX
if (queue_id >= dev->data->nb_rx_queues)
return -ENODEV;
#endif
struct rte_eth_dev *dev;
void *txq;
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
#endif
dev = &rte_eth_devices[port_id];
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
if (queue_id >= dev->data->nb_tx_queues)
return -ENODEV;
#endif
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
* meet devices requirements with rte_errno set appropriately:
* - EINVAL: offload flags are not correctly set
* - ENOTSUP: the offload feature is not supported by the hardware
+ * - ENODEV: if *port_id* is invalid (with debug enabled only)
*
*/
{
struct rte_eth_dev *dev;
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
if (!rte_eth_dev_is_valid_port(port_id)) {
RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
- rte_errno = EINVAL;
+ rte_errno = ENODEV;
return 0;
}
#endif
dev = &rte_eth_devices[port_id];
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
if (queue_id >= dev->data->nb_tx_queues) {
RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
rte_errno = EINVAL;