X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ethdev%2Frte_ethdev.h;h=e341a08817e0a64326e14862e11de22708ddf874;hb=2ca75c65af4ca9c836ca0b4eabec69bde05fbed6;hp=e9e3a16995faa75075392e5c929592339588b11d;hpb=f35e5b3e07b2e7999f7d3085236cc366c9cb4da6;p=dpdk.git diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h index e9e3a16995..e341a08817 100644 --- a/lib/librte_ethdev/rte_ethdev.h +++ b/lib/librte_ethdev/rte_ethdev.h @@ -158,6 +158,7 @@ extern "C" { #include #include +#include "rte_ethdev_trace_fp.h" #include "rte_dev_info.h" extern int rte_eth_dev_logtype; @@ -252,6 +253,7 @@ struct rte_eth_stats { uint64_t ierrors; /**< Total number of erroneous received packets. */ uint64_t oerrors; /**< Total number of failed transmitted packets. */ uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */ + /* Queue stats are limited to max 256 queues */ uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]; /**< Total number of queue RX packets. */ uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]; @@ -283,6 +285,7 @@ struct rte_eth_stats { #define ETH_LINK_SPEED_50G (1 << 12) /**< 50 Gbps */ #define ETH_LINK_SPEED_56G (1 << 13) /**< 56 Gbps */ #define ETH_LINK_SPEED_100G (1 << 14) /**< 100 Gbps */ +#define ETH_LINK_SPEED_200G (1 << 15) /**< 200 Gbps */ /** * Ethernet numeric link speeds in Mbps @@ -300,6 +303,8 @@ struct rte_eth_stats { #define ETH_SPEED_NUM_50G 50000 /**< 50 Gbps */ #define ETH_SPEED_NUM_56G 56000 /**< 56 Gbps */ #define ETH_SPEED_NUM_100G 100000 /**< 100 Gbps */ +#define ETH_SPEED_NUM_200G 200000 /**< 200 Gbps */ +#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */ /** * A structure used to retrieve link-level information of an Ethernet port. @@ -319,6 +324,7 @@ struct rte_eth_link { #define ETH_LINK_UP 1 /**< Link is up (see link_status). */ #define ETH_LINK_FIXED 0 /**< No autonegotiation (see link_autoneg). */ #define ETH_LINK_AUTONEG 1 /**< Autonegotiated (see link_autoneg). */ +#define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */ /** * A structure used to configure the ring threshold registers of an RX/TX @@ -511,6 +517,14 @@ struct rte_eth_rss_conf { #define ETH_RSS_GENEVE (1ULL << 20) #define ETH_RSS_NVGRE (1ULL << 21) #define ETH_RSS_GTPU (1ULL << 23) +#define ETH_RSS_ETH (1ULL << 24) +#define ETH_RSS_S_VLAN (1ULL << 25) +#define ETH_RSS_C_VLAN (1ULL << 26) +#define ETH_RSS_ESP (1ULL << 27) +#define ETH_RSS_AH (1ULL << 28) +#define ETH_RSS_L2TPV3 (1ULL << 29) +#define ETH_RSS_PFCP (1ULL << 30) +#define ETH_RSS_PPPOE (1ULL << 31) /* * We use the following macros to combine with above ETH_RSS_* for @@ -525,6 +539,51 @@ struct rte_eth_rss_conf { #define ETH_RSS_L3_DST_ONLY (1ULL << 62) #define ETH_RSS_L4_SRC_ONLY (1ULL << 61) #define ETH_RSS_L4_DST_ONLY (1ULL << 60) +#define ETH_RSS_L2_SRC_ONLY (1ULL << 59) +#define ETH_RSS_L2_DST_ONLY (1ULL << 58) + +/* + * Only select IPV6 address prefix as RSS input set according to + * https://tools.ietf.org/html/rfc6052 + * Must be combined with ETH_RSS_IPV6, ETH_RSS_NONFRAG_IPV6_UDP, + * ETH_RSS_NONFRAG_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_SCTP. + */ +#define RTE_ETH_RSS_L3_PRE32 (1ULL << 57) +#define RTE_ETH_RSS_L3_PRE40 (1ULL << 56) +#define RTE_ETH_RSS_L3_PRE48 (1ULL << 55) +#define RTE_ETH_RSS_L3_PRE56 (1ULL << 54) +#define RTE_ETH_RSS_L3_PRE64 (1ULL << 53) +#define RTE_ETH_RSS_L3_PRE96 (1ULL << 52) + +/* + * Use the following macros to combine with the above layers + * to choose inner and outer layers or both for RSS computation. + * Bits 50 and 51 are reserved for this. + */ + +/** + * level 0, requests the default behavior. + * Depending on the packet type, it can mean outermost, innermost, + * anything in between or even no RSS. + * It basically stands for the innermost encapsulation level RSS + * can be performed on according to PMD and device capabilities. + */ +#define ETH_RSS_LEVEL_PMD_DEFAULT (0ULL << 50) + +/** + * level 1, requests RSS to be performed on the outermost packet + * encapsulation level. + */ +#define ETH_RSS_LEVEL_OUTERMOST (1ULL << 50) + +/** + * level 2, requests RSS to be performed on the specified inner packet + * encapsulation level, from outermost to innermost (lower to higher values). + */ +#define ETH_RSS_LEVEL_INNERMOST (2ULL << 50) +#define ETH_RSS_LEVEL_MASK (3ULL << 50) + +#define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50) /** * For input set change of hash filter, if SRC_ONLY and DST_ONLY of @@ -548,6 +607,102 @@ rte_eth_rss_hf_refine(uint64_t rss_hf) return rss_hf; } +#define ETH_RSS_IPV6_PRE32 ( \ + ETH_RSS_IPV6 | \ + RTE_ETH_RSS_L3_PRE32) + +#define ETH_RSS_IPV6_PRE40 ( \ + ETH_RSS_IPV6 | \ + RTE_ETH_RSS_L3_PRE40) + +#define ETH_RSS_IPV6_PRE48 ( \ + ETH_RSS_IPV6 | \ + RTE_ETH_RSS_L3_PRE48) + +#define ETH_RSS_IPV6_PRE56 ( \ + ETH_RSS_IPV6 | \ + RTE_ETH_RSS_L3_PRE56) + +#define ETH_RSS_IPV6_PRE64 ( \ + ETH_RSS_IPV6 | \ + RTE_ETH_RSS_L3_PRE64) + +#define ETH_RSS_IPV6_PRE96 ( \ + ETH_RSS_IPV6 | \ + RTE_ETH_RSS_L3_PRE96) + +#define ETH_RSS_IPV6_PRE32_UDP ( \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + RTE_ETH_RSS_L3_PRE32) + +#define ETH_RSS_IPV6_PRE40_UDP ( \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + RTE_ETH_RSS_L3_PRE40) + +#define ETH_RSS_IPV6_PRE48_UDP ( \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + RTE_ETH_RSS_L3_PRE48) + +#define ETH_RSS_IPV6_PRE56_UDP ( \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + RTE_ETH_RSS_L3_PRE56) + +#define ETH_RSS_IPV6_PRE64_UDP ( \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + RTE_ETH_RSS_L3_PRE64) + +#define ETH_RSS_IPV6_PRE96_UDP ( \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + RTE_ETH_RSS_L3_PRE96) + +#define ETH_RSS_IPV6_PRE32_TCP ( \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + RTE_ETH_RSS_L3_PRE32) + +#define ETH_RSS_IPV6_PRE40_TCP ( \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + RTE_ETH_RSS_L3_PRE40) + +#define ETH_RSS_IPV6_PRE48_TCP ( \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + RTE_ETH_RSS_L3_PRE48) + +#define ETH_RSS_IPV6_PRE56_TCP ( \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + RTE_ETH_RSS_L3_PRE56) + +#define ETH_RSS_IPV6_PRE64_TCP ( \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + RTE_ETH_RSS_L3_PRE64) + +#define ETH_RSS_IPV6_PRE96_TCP ( \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + RTE_ETH_RSS_L3_PRE96) + +#define ETH_RSS_IPV6_PRE32_SCTP ( \ + ETH_RSS_NONFRAG_IPV6_SCTP | \ + RTE_ETH_RSS_L3_PRE32) + +#define ETH_RSS_IPV6_PRE40_SCTP ( \ + ETH_RSS_NONFRAG_IPV6_SCTP | \ + RTE_ETH_RSS_L3_PRE40) + +#define ETH_RSS_IPV6_PRE48_SCTP ( \ + ETH_RSS_NONFRAG_IPV6_SCTP | \ + RTE_ETH_RSS_L3_PRE48) + +#define ETH_RSS_IPV6_PRE56_SCTP ( \ + ETH_RSS_NONFRAG_IPV6_SCTP | \ + RTE_ETH_RSS_L3_PRE56) + +#define ETH_RSS_IPV6_PRE64_SCTP ( \ + ETH_RSS_NONFRAG_IPV6_SCTP | \ + RTE_ETH_RSS_L3_PRE64) + +#define ETH_RSS_IPV6_PRE96_SCTP ( \ + ETH_RSS_NONFRAG_IPV6_SCTP | \ + RTE_ETH_RSS_L3_PRE96) + #define ETH_RSS_IP ( \ ETH_RSS_IPV4 | \ ETH_RSS_FRAG_IPV4 | \ @@ -576,6 +731,10 @@ rte_eth_rss_hf_refine(uint64_t rss_hf) ETH_RSS_GENEVE | \ ETH_RSS_NVGRE) +#define ETH_RSS_VLAN ( \ + ETH_RSS_S_VLAN | \ + ETH_RSS_C_VLAN) + /**< Mask of valid RSS hash protocols */ #define ETH_RSS_PROTO_MASK ( \ ETH_RSS_IPV4 | \ @@ -798,7 +957,6 @@ struct rte_eth_txmode { */ uint64_t offloads; - /* For i40e specifically */ uint16_t pvid; __extension__ uint8_t hw_vlan_reject_tagged : 1, @@ -812,6 +970,61 @@ struct rte_eth_txmode { void *reserved_ptrs[2]; /**< Reserved for future fields */ }; +/** + * @warning + * @b EXPERIMENTAL: this structure may change without prior notice. + * + * A structure used to configure an Rx packet segment to split. + * + * If RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag is set in offloads field, + * the PMD will split the received packets into multiple segments + * according to the specification in the description array: + * + * - The first network buffer will be allocated from the memory pool, + * specified in the first array element, the second buffer, from the + * pool in the second element, and so on. + * + * - The offsets from the segment description elements specify + * the data offset from the buffer beginning except the first mbuf. + * The first segment offset is added with RTE_PKTMBUF_HEADROOM. + * + * - The lengths in the elements define the maximal data amount + * being received to each segment. The receiving starts with filling + * up the first mbuf data buffer up to specified length. If the + * there are data remaining (packet is longer than buffer in the first + * mbuf) the following data will be pushed to the next segment + * up to its own length, and so on. + * + * - If the length in the segment description element is zero + * the actual buffer size will be deduced from the appropriate + * memory pool properties. + * + * - If there is not enough elements to describe the buffer for entire + * packet of maximal length the following parameters will be used + * for the all remaining segments: + * - pool from the last valid element + * - the buffer size from this pool + * - zero offset + */ +struct rte_eth_rxseg_split { + struct rte_mempool *mp; /**< Memory pool to allocate segment from. */ + uint16_t length; /**< Segment data length, configures split point. */ + uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */ + uint32_t reserved; /**< Reserved field. */ +}; + +/** + * @warning + * @b EXPERIMENTAL: this structure may change without prior notice. + * + * A common structure used to describe Rx packet segment properties. + */ +union rte_eth_rxseg { + /* The settings for buffer split offload. */ + struct rte_eth_rxseg_split split; + /* The other features settings should be added here. */ +}; + /** * A structure used to configure an RX ring of an Ethernet port. */ @@ -820,12 +1033,21 @@ struct rte_eth_rxconf { uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors. */ uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */ uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */ + uint16_t rx_nseg; /**< Number of descriptions in rx_seg array. */ /** * Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags. * Only offloads set on rx_queue_offload_capa or rx_offload_capa * fields on rte_eth_dev_info structure are allowed to be set. */ uint64_t offloads; + /** + * Points to the array of segment descriptions for an entire packet. + * Array elements are properties for consecutive Rx segments. + * + * The supported capabilities of receiving segmentation is reported + * in rte_eth_dev_info.rx_seg_capa field. + */ + union rte_eth_rxseg *rx_seg; uint64_t reserved_64s[2]; /**< Reserved for future fields */ void *reserved_ptrs[2]; /**< Reserved for future fields */ @@ -888,7 +1110,32 @@ struct rte_eth_hairpin_peer { * A structure used to configure hairpin binding. */ struct rte_eth_hairpin_conf { - uint16_t peer_count; /**< The number of peers. */ + uint32_t peer_count:16; /**< The number of peers. */ + + /** + * Explicit Tx flow rule mode. + * One hairpin pair of queues should have the same attribute. + * + * - When set, the user should be responsible for inserting the hairpin + * Tx part flows and removing them. + * - When clear, the PMD will try to handle the Tx part of the flows, + * e.g., by splitting one flow into two parts. + */ + uint32_t tx_explicit:1; + + /** + * Manually bind hairpin queues. + * One hairpin pair of queues should have the same attribute. + * + * - When set, to enable hairpin, the user should call the hairpin bind + * function after all the queues are set up properly and the ports are + * started. Also, the hairpin unbind function should be called + * accordingly before stopping a port that with hairpin configured. + * - When clear, the PMD will try to enable the hairpin with the queues + * configured automatically during port start. + */ + uint32_t manual_bind:1; + uint32_t reserved:14; /**< Reserved bits. */ struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS]; }; @@ -1103,6 +1350,7 @@ struct rte_eth_conf { #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000 #define DEV_RX_OFFLOAD_RSS_HASH 0x00080000 +#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \ DEV_RX_OFFLOAD_UDP_CKSUM | \ @@ -1161,6 +1409,10 @@ struct rte_eth_conf { /** Device supports outer UDP checksum */ #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000 +/** Device supports send on timestamp */ +#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000 + + #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001 /**< Device supports Rx queue setup after device started*/ #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002 @@ -1214,6 +1466,21 @@ struct rte_eth_switch_info { */ }; +/** + * @warning + * @b EXPERIMENTAL: this structure may change without prior notice. + * + * Ethernet device Rx buffer segmentation capabilities. + */ +struct rte_eth_rxseg_capa { + __extension__ + uint32_t multi_pools:1; /**< Supports receiving to multiple pools.*/ + uint32_t offset_allowed:1; /**< Supports buffer offsets. */ + uint32_t offset_align_log2:4; /**< Required offset alignment. */ + uint16_t max_nseg; /**< Maximum amount of segments to split. */ + uint16_t reserved; /**< Reserved field. */ +}; + /** * Ethernet device information */ @@ -1242,6 +1509,7 @@ struct rte_eth_dev_info { /** Maximum number of hash MAC addresses for MTA and UTA. */ uint16_t max_vfs; /**< Maximum number of VFs. */ uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */ + struct rte_eth_rxseg_capa rx_seg_capa; /**< Segmentation capability.*/ uint64_t rx_offload_capa; /**< All RX offload capabilities including all per-queue ones */ uint64_t tx_offload_capa; @@ -1291,6 +1559,7 @@ struct rte_eth_rxq_info { struct rte_eth_rxconf conf; /**< queue config parameters. */ uint8_t scattered_rx; /**< scattered packets RX supported. */ uint16_t nb_desc; /**< configured number of RXDs. */ + uint16_t rx_buf_size; /**< hardware receive buffer size. */ } __rte_cache_min_aligned; /** @@ -1345,6 +1614,13 @@ struct rte_eth_xstat { * An array of this structure is returned by rte_eth_xstats_get_names(). * It lists the names of extended statistics for a PMD. The *rte_eth_xstat* * structure references these names by their array index. + * + * The xstats should follow a common naming scheme. + * Some names are standardized in rte_stats_strings. + * Examples: + * - rx_missed_errors + * - tx_q3_bytes + * - tx_size_128_to_255_packets */ struct rte_eth_xstat_name { char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */ @@ -1360,13 +1636,13 @@ struct rte_eth_xstat_name { struct rte_eth_dcb_tc_queue_mapping { /** rx queues assigned to tc per Pool */ struct { - uint8_t base; - uint8_t nb_queue; + uint16_t base; + uint16_t nb_queue; } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]; /** rx queues assigned to tc per Pool */ struct { - uint8_t base; - uint8_t nb_queue; + uint16_t base; + uint16_t nb_queue; } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]; }; @@ -1382,6 +1658,29 @@ struct rte_eth_dcb_info { struct rte_eth_dcb_tc_queue_mapping tc_queue; }; +/** + * This enum indicates the possible Forward Error Correction (FEC) modes + * of an ethdev port. + */ +enum rte_eth_fec_mode { + RTE_ETH_FEC_NOFEC = 0, /**< FEC is off */ + RTE_ETH_FEC_AUTO, /**< FEC autonegotiation modes */ + RTE_ETH_FEC_BASER, /**< FEC using common algorithm */ + RTE_ETH_FEC_RS, /**< FEC using RS algorithm */ +}; + +/* Translate from FEC mode to FEC capa */ +#define RTE_ETH_FEC_MODE_TO_CAPA(x) (1U << (x)) + +/* This macro indicates FEC capa mask */ +#define RTE_ETH_FEC_MODE_CAPA_MASK(x) (1U << (RTE_ETH_FEC_ ## x)) + +/* A structure used to get capabilities per link speed */ +struct rte_eth_fec_capa { + uint32_t speed; /**< Link speed (see ETH_SPEED_NUM_*) */ + uint32_t capa; /**< FEC capabilities bitmask */ +}; + #define RTE_ETH_ALL RTE_MAX_ETHPORTS /* Macros to check for valid port */ @@ -1492,11 +1791,8 @@ struct rte_eth_dev_owner { char name[RTE_ETH_MAX_OWNER_NAME_LEN]; /**< The owner name. */ }; -/** - * Port is released (i.e. totally freed and data erased) on close. - * Temporary flag for PMD migration to new rte_eth_dev_close() behaviour. - */ -#define RTE_ETH_DEV_CLOSE_REMOVE 0x0001 +/** PMD supports thread-safe flow operations */ +#define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE 0x0001 /** Device supports link state interrupt */ #define RTE_ETH_DEV_INTR_LSC 0x0002 /** Device is a bonded slave */ @@ -1507,6 +1803,11 @@ struct rte_eth_dev_owner { #define RTE_ETH_DEV_REPRESENTOR 0x0010 /** Device does not support MAC change after started */ #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020 +/** + * Queue xstats filled automatically by ethdev layer. + * PMDs filling the queue xstats themselves should not set this flag + */ +#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040 /** * Iterates over valid ethdev ports owned by a specific owner. @@ -1594,8 +1895,7 @@ rte_eth_find_next_of(uint16_t port_id_start, */ __rte_experimental uint16_t -rte_eth_find_next_sibling(uint16_t port_id_start, - uint16_t ref_port_id); +rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id); /** * Macro to iterate over all ethdev ports sharing the same rte_device @@ -1841,12 +2141,25 @@ rte_eth_dev_is_removed(uint16_t port_id); * No need to repeat any bit in rx_conf->offloads which has already been * enabled in rte_eth_dev_configure() at port level. An offloading enabled * at port level can't be disabled at queue level. + * The configuration structure also contains the pointer to the array + * of the receiving buffer segment descriptions, see rx_seg and rx_nseg + * fields, this extended configuration might be used by split offloads like + * RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT. If mp_pool is not NULL, + * the extended configuration fields must be set to NULL and zero. * @param mb_pool * The pointer to the memory pool from which to allocate *rte_mbuf* network - * memory buffers to populate each descriptor of the receive ring. + * memory buffers to populate each descriptor of the receive ring. There are + * two options to provide Rx buffer configuration: + * - single pool: + * mb_pool is not NULL, rx_conf.rx_nseg is 0. + * - multiple segments description: + * mb_pool is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not 0. + * Taken only if flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT is set in offloads. + * * @return * - 0: Success, receive queue correctly set up. * - -EIO: if device is removed. + * - -ENODEV: if *port_id* is invalid. * - -EINVAL: The memory pool pointer is null or the size of network buffers * which can be allocated from this memory pool does not fit the various * buffer sizes allowed by the device controller. @@ -1881,6 +2194,7 @@ int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, * * @return * - (0) if successful. + * - (-ENODEV) if *port_id* is invalid. * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. * - (-ENOMEM) if unable to allocate the resources. @@ -1962,6 +2276,7 @@ int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, * * @return * - (0) if successful. + * - (-ENODEV) if *port_id* is invalid. * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. * - (-ENOMEM) if unable to allocate the resources. @@ -1971,6 +2286,88 @@ int rte_eth_tx_hairpin_queue_setup (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf); +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Get all the hairpin peer Rx / Tx ports of the current port. + * The caller should ensure that the array is large enough to save the ports + * list. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param peer_ports + * Pointer to the array to store the peer ports list. + * @param len + * Length of the array to store the port identifiers. + * @param direction + * Current port to peer port direction + * positive - current used as Tx to get all peer Rx ports. + * zero - current used as Rx to get all peer Tx ports. + * + * @return + * - (0 or positive) actual peer ports number. + * - (-EINVAL) if bad parameter. + * - (-ENODEV) if *port_id* invalid + * - (-ENOTSUP) if hardware doesn't support. + * - Others detailed errors from PMD drivers. + */ +__rte_experimental +int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, + size_t len, uint32_t direction); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Bind all hairpin Tx queues of one port to the Rx queues of the peer port. + * It is only allowed to call this function after all hairpin queues are + * configured properly and the devices are in started state. + * + * @param tx_port + * The identifier of the Tx port. + * @param rx_port + * The identifier of peer Rx port. + * RTE_MAX_ETHPORTS is allowed for the traversal of all devices. + * Rx port ID could have the same value as Tx port ID. + * + * @return + * - (0) if successful. + * - (-ENODEV) if Tx port ID is invalid. + * - (-EBUSY) if device is not in started state. + * - (-ENOTSUP) if hardware doesn't support. + * - Others detailed errors from PMD drivers. + */ +__rte_experimental +int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Unbind all hairpin Tx queues of one port from the Rx queues of the peer port. + * This should be called before closing the Tx or Rx devices, if the bind + * function is called before. + * After unbinding the hairpin ports pair, it is allowed to bind them again. + * Changing queues configuration should be after stopping the device(s). + * + * @param tx_port + * The identifier of the Tx port. + * @param rx_port + * The identifier of peer Rx port. + * RTE_MAX_ETHPORTS is allowed for traversal of all devices. + * Rx port ID could have the same value as Tx port ID. + * + * @return + * - (0) if successful. + * - (-ENODEV) if Tx port ID is invalid. + * - (-EBUSY) if device is in stopped state. + * - (-ENOTSUP) if hardware doesn't support. + * - Others detailed errors from PMD drivers. + */ +__rte_experimental +int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port); + /** * Return the NUMA socket to which an Ethernet device is connected * @@ -2006,7 +2403,8 @@ int rte_eth_dev_is_valid_port(uint16_t port_id); * to rte_eth_dev_configure(). * @return * - 0: Success, the receive queue is started. - * - -EINVAL: The port_id or the queue_id out of range or belong to hairpin. + * - -ENODEV: if *port_id* is invalid. + * - -EINVAL: The queue_id out of range or belong to hairpin. * - -EIO: if device is removed. * - -ENOTSUP: The function not supported in PMD driver. */ @@ -2023,7 +2421,8 @@ int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id); * to rte_eth_dev_configure(). * @return * - 0: Success, the receive queue is stopped. - * - -EINVAL: The port_id or the queue_id out of range or belong to hairpin. + * - -ENODEV: if *port_id* is invalid. + * - -EINVAL: The queue_id out of range or belong to hairpin. * - -EIO: if device is removed. * - -ENOTSUP: The function not supported in PMD driver. */ @@ -2041,7 +2440,8 @@ int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id); * to rte_eth_dev_configure(). * @return * - 0: Success, the transmit queue is started. - * - -EINVAL: The port_id or the queue_id out of range or belong to hairpin. + * - -ENODEV: if *port_id* is invalid. + * - -EINVAL: The queue_id out of range or belong to hairpin. * - -EIO: if device is removed. * - -ENOTSUP: The function not supported in PMD driver. */ @@ -2058,7 +2458,8 @@ int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id); * to rte_eth_dev_configure(). * @return * - 0: Success, the transmit queue is stopped. - * - -EINVAL: The port_id or the queue_id out of range or belong to hairpin. + * - -ENODEV: if *port_id* is invalid. + * - -EINVAL: The queue_id out of range or belong to hairpin. * - -EIO: if device is removed. * - -ENOTSUP: The function not supported in PMD driver. */ @@ -2091,8 +2492,11 @@ int rte_eth_dev_start(uint16_t port_id); * * @param port_id * The port identifier of the Ethernet device. + * @return + * - 0: Success, Ethernet device stopped. + * - <0: Error code of the driver device stop function. */ -void rte_eth_dev_stop(uint16_t port_id); +int rte_eth_dev_stop(uint16_t port_id); /** * Link up an Ethernet device. @@ -2121,13 +2525,15 @@ int rte_eth_dev_set_link_down(uint16_t port_id); /** * Close a stopped Ethernet device. The device cannot be restarted! - * The function frees all port resources if the driver supports - * the flag RTE_ETH_DEV_CLOSE_REMOVE. + * The function frees all port resources. * * @param port_id * The port identifier of the Ethernet device. + * @return + * - Zero if the port is closed successfully. + * - Negative if something went wrong. */ -void rte_eth_dev_close(uint16_t port_id); +int rte_eth_dev_close(uint16_t port_id); /** * Reset a Ethernet device and keep its port id. @@ -2159,7 +2565,7 @@ void rte_eth_dev_close(uint16_t port_id); * * @return * - (0) if successful. - * - (-EINVAL) if port identifier is invalid. + * - (-ENODEV) if *port_id* is invalid. * - (-ENOTSUP) if hardware doesn't support this function. * - (-EPERM) if not ran from the primary process. * - (-EIO) if re-initialisation failed or device is removed. @@ -2245,15 +2651,16 @@ int rte_eth_allmulticast_disable(uint16_t port_id); int rte_eth_allmulticast_get(uint16_t port_id); /** - * Retrieve the status (ON/OFF), the speed (in Mbps) and the mode (HALF-DUPLEX - * or FULL-DUPLEX) of the physical link of an Ethernet device. It might need - * to wait up to 9 seconds in it. + * Retrieve the link status (up/down), the duplex mode (half/full), + * the negotiation (auto/fixed), and if available, the speed (Mbps). + * + * It might need to wait up to 9 seconds. + * @see rte_eth_link_get_nowait. * * @param port_id * The port identifier of the Ethernet device. * @param link - * A pointer to an *rte_eth_link* structure to be filled with - * the status, the speed and the mode of the Ethernet device link. + * Link information written back. * @return * - (0) if successful. * - (-ENOTSUP) if the function is not supported in PMD driver. @@ -2262,15 +2669,13 @@ int rte_eth_allmulticast_get(uint16_t port_id); int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link); /** - * Retrieve the status (ON/OFF), the speed (in Mbps) and the mode (HALF-DUPLEX - * or FULL-DUPLEX) of the physical link of an Ethernet device. It is a no-wait - * version of rte_eth_link_get(). + * Retrieve the link status (up/down), the duplex mode (half/full), + * the negotiation (auto/fixed), and if available, the speed (Mbps). * * @param port_id * The port identifier of the Ethernet device. * @param link - * A pointer to an *rte_eth_link* structure to be filled with - * the status, the speed and the mode of the Ethernet device link. + * Link information written back. * @return * - (0) if successful. * - (-ENOTSUP) if the function is not supported in PMD driver. @@ -2278,6 +2683,44 @@ int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link); */ int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * The function converts a link_speed to a string. It handles all special + * values like unknown or none speed. + * + * @param link_speed + * link_speed of rte_eth_link struct + * @return + * Link speed in textual format. It's pointer to immutable memory. + * No free is required. + */ +__rte_experimental +const char *rte_eth_link_speed_to_str(uint32_t link_speed); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * The function converts a rte_eth_link struct representing a link status to + * a string. + * + * @param str + * A pointer to a string to be filled with textual representation of + * device status. At least ETH_LINK_MAX_STR_LEN bytes should be allocated to + * store default link status text. + * @param len + * Length of available memory at 'str' string. + * @param eth_link + * Link status returned by rte_eth_link_get function + * @return + * Number of bytes written to str array. + */ +__rte_experimental +int rte_eth_link_to_str(char *str, size_t len, + const struct rte_eth_link *eth_link); + /** * Retrieve the general I/O statistics of an Ethernet device. * @@ -2477,6 +2920,7 @@ int rte_eth_xstats_reset(uint16_t port_id); * The per-queue packet statistics functionality number that the transmit * queue is to be assigned. * The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1]. + * Max RTE_ETHDEV_QUEUE_STAT_CNTRS being 256. * @return * Zero if successful. Non-zero otherwise. */ @@ -2497,6 +2941,7 @@ int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, * The per-queue packet statistics functionality number that the receive * queue is to be assigned. * The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1]. + * Max RTE_ETHDEV_QUEUE_STAT_CNTRS being 256. * @return * Zero if successful. Non-zero otherwise. */ @@ -2716,7 +3161,6 @@ int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on); /** * Enable/Disable hardware VLAN Strip by a rx queue of an Ethernet device. - * 82599/X540/X550 can support VLAN stripping at the rx queue level * * @param port_id * The port identifier of the Ethernet device. @@ -2738,8 +3182,7 @@ int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, /** * Set the Outer VLAN Ether Type by an Ethernet device, it can be inserted to - * the VLAN Header. This is a register setup available on some Intel NIC, not - * but all, please check the data sheet for availability. + * the VLAN header. * * @param port_id * The port identifier of the Ethernet device. @@ -2758,12 +3201,7 @@ int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, uint16_t tag_type); /** - * Set VLAN offload configuration on an Ethernet device - * Enable/Disable Extended VLAN by an Ethernet device, This is a register setup - * available on some Intel NIC, not but all, please check the data sheet for - * availability. - * Enable/Disable VLAN Strip can be done on rx queue for certain NIC, but here - * the configuration is applied on the port level. + * Set VLAN offload configuration on an Ethernet device. * * @param port_id * The port identifier of the Ethernet device. @@ -3015,6 +3453,7 @@ enum rte_eth_event_type { RTE_ETH_EVENT_NEW, /**< port is probed */ RTE_ETH_EVENT_DESTROY, /**< port is released */ RTE_ETH_EVENT_IPSEC, /**< IPsec offload related event */ + RTE_ETH_EVENT_FLOW_AGED,/**< New aged-out flows is detected */ RTE_ETH_EVENT_MAX /**< max value of this enum */ }; @@ -3204,6 +3643,88 @@ int rte_eth_led_on(uint16_t port_id); */ int rte_eth_led_off(uint16_t port_id); +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Get Forward Error Correction(FEC) capability. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param speed_fec_capa + * speed_fec_capa is out only with per-speed capabilities. + * If set to NULL, the function returns the required number + * of required array entries. + * @param num + * a number of elements in an speed_fec_capa array. + * + * @return + * - A non-negative value lower or equal to num: success. The return value + * is the number of entries filled in the fec capa array. + * - A non-negative value higher than num: error, the given fec capa array + * is too small. The return value corresponds to the num that should + * be given to succeed. The entries in fec capa array are not valid and + * shall not be used by the caller. + * - (-ENOTSUP) if underlying hardware OR driver doesn't support. + * that operation. + * - (-EIO) if device is removed. + * - (-ENODEV) if *port_id* invalid. + * - (-EINVAL) if *num* or *speed_fec_capa* invalid + */ +__rte_experimental +int rte_eth_fec_get_capability(uint16_t port_id, + struct rte_eth_fec_capa *speed_fec_capa, + unsigned int num); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Get current Forward Error Correction(FEC) mode. + * If link is down and AUTO is enabled, AUTO is returned, otherwise, + * configured FEC mode is returned. + * If link is up, current FEC mode is returned. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fec_capa + * A bitmask of enabled FEC modes. If AUTO bit is set, other + * bits specify FEC modes which may be negotiated. If AUTO + * bit is clear, specify FEC modes to be used (only one valid + * mode per speed may be set). + * @return + * - (0) if successful. + * - (-ENOTSUP) if underlying hardware OR driver doesn't support. + * that operation. + * - (-EIO) if device is removed. + * - (-ENODEV) if *port_id* invalid. + */ +__rte_experimental +int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Set Forward Error Correction(FEC) mode. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fec_capa + * A bitmask of allowed FEC modes. If AUTO bit is set, other + * bits specify FEC modes which may be negotiated. If AUTO + * bit is clear, specify FEC modes to be used (only one valid + * mode per speed may be set). + * @return + * - (0) if successful. + * - (-EINVAL) if the FEC mode is not valid. + * - (-ENOTSUP) if underlying hardware OR driver doesn't support. + * - (-EIO) if device is removed. + * - (-ENODEV) if *port_id* invalid. + */ +__rte_experimental +int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa); + /** * Get current status of the Ethernet link flow control for Ethernet device * @@ -3256,8 +3777,7 @@ int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf); /** - * Add a MAC address to an internal array of addresses used to enable whitelist - * filtering to accept packets only if the destination MAC address matches. + * Add a MAC address to the set used for filtering incoming packets. * * @param port_id * The port identifier of the Ethernet device. @@ -3321,6 +3841,7 @@ int rte_eth_dev_default_mac_addr_set(uint16_t port_id, * rte_eth_dev_info_get(). * @return * - (0) if successful. + * - (-ENODEV) if *port_id* is invalid. * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. * - (-EIO) if device is removed. @@ -3342,6 +3863,7 @@ int rte_eth_dev_rss_reta_update(uint16_t port_id, * rte_eth_dev_info_get(). * @return * - (0) if successful. + * - (-ENODEV) if *port_id* is invalid. * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. * - (-EIO) if device is removed. @@ -3612,7 +4134,8 @@ struct rte_eth_rxtx_callback; * The callback function * @param user_param * A generic pointer parameter which will be passed to each invocation of the - * callback function on this port and queue. + * callback function on this port and queue. Inter-thread synchronization + * of any user data changes is the responsibility of the user. * * @return * NULL on error. @@ -3641,7 +4164,8 @@ rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, * The callback function * @param user_param * A generic pointer parameter which will be passed to each invocation of the - * callback function on this port and queue. + * callback function on this port and queue. Inter-thread synchronization + * of any user data changes is the responsibility of the user. * * @return * NULL on error. @@ -3669,7 +4193,8 @@ rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, * The callback function * @param user_param * A generic pointer parameter which will be passed to each invocation of the - * callback function on this port and queue. + * callback function on this port and queue. Inter-thread synchronization + * of any user data changes is the responsibility of the user. * * @return * NULL on error. @@ -3694,7 +4219,9 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, * on that queue. * * - After a short delay - where the delay is sufficient to allow any - * in-flight callbacks to complete. + * in-flight callbacks to complete. Alternately, the RCU mechanism can be + * used to detect when data plane threads have ceased referencing the + * callback memory. * * @param port_id * The port identifier of the Ethernet device. @@ -3705,8 +4232,9 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, * * @return * - 0: Success. Callback was removed. + * - -ENODEV: If *port_id* is invalid. * - -ENOTSUP: Callback support is not available. - * - -EINVAL: The port_id or the queue_id is out of range, or the callback + * - -EINVAL: The queue_id is out of range, or the callback * is NULL or not found for the port/queue. */ int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, @@ -3727,7 +4255,9 @@ int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, * on that queue. * * - After a short delay - where the delay is sufficient to allow any - * in-flight callbacks to complete. + * in-flight callbacks to complete. Alternately, the RCU mechanism can be + * used to detect when data plane threads have ceased referencing the + * callback memory. * * @param port_id * The port identifier of the Ethernet device. @@ -3738,8 +4268,9 @@ int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, * * @return * - 0: Success. Callback was removed. + * - -ENODEV: If *port_id* is invalid. * - -ENOTSUP: Callback support is not available. - * - -EINVAL: The port_id or the queue_id is out of range, or the callback + * - -EINVAL: The queue_id is out of range, or the callback * is NULL or not found for the port/queue. */ int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, @@ -3759,8 +4290,9 @@ int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, * * @return * - 0: Success + * - -ENODEV: If *port_id* is invalid. * - -ENOTSUP: routine is not supported by the device PMD. - * - -EINVAL: The port_id or the queue_id is out of range, or the queue + * - -EINVAL: The queue_id is out of range, or the queue * is hairpin queue. */ int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, @@ -3780,8 +4312,9 @@ int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, * * @return * - 0: Success + * - -ENODEV: If *port_id* is invalid. * - -ENOTSUP: routine is not supported by the device PMD. - * - -EINVAL: The port_id or the queue_id is out of range, or the queue + * - -EINVAL: The queue_id is out of range, or the queue * is hairpin queue. */ int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, @@ -3801,8 +4334,9 @@ int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, * * @return * - 0: Success + * - -ENODEV: If *port_id* is invalid. * - -ENOTSUP: routine is not supported by the device PMD. - * - -EINVAL: The port_id or the queue_id is out of range. + * - -EINVAL: The queue_id is out of range. */ __rte_experimental int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, @@ -3822,8 +4356,9 @@ int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, * * @return * - 0: Success + * - -ENODEV: If *port_id* is invalid. * - -ENOTSUP: routine is not supported by the device PMD. - * - -EINVAL: The port_id or the queue_id is out of range. + * - -EINVAL: The queue_id is out of range. */ __rte_experimental int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, @@ -4212,6 +4747,7 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id); * Buffer of size RTE_ETH_NAME_MAX_LEN to store the name. * @return * - (0) if successful. +* - (-ENODEV) if *port_id* is invalid. * - (-EINVAL) on failure. */ int @@ -4388,10 +4924,18 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, rx_pkts, nb_pkts); #ifdef RTE_ETHDEV_RXTX_CALLBACKS - if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) { - struct rte_eth_rxtx_callback *cb = - dev->post_rx_burst_cbs[queue_id]; + struct rte_eth_rxtx_callback *cb; + /* __ATOMIC_RELEASE memory order was used when the + * call back was inserted into the list. + * Since there is a clear dependency between loading + * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is + * not required. + */ + cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id], + __ATOMIC_RELAXED); + + if (unlikely(cb != NULL)) { do { nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx, nb_pkts, cb->param); @@ -4400,6 +4944,7 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, } #endif + rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx); return nb_rx; } @@ -4412,7 +4957,8 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, * The queue id on the specific port. * @return * The number of used descriptors in the specific queue, or: - * (-EINVAL) if *port_id* or *queue_id* is invalid + * - (-ENODEV) if *port_id* is invalid. + * (-EINVAL) if *queue_id* is invalid * (-ENOTSUP) if the device does not support this function */ static inline int @@ -4420,13 +4966,14 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id) { struct rte_eth_dev *dev; - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP); - if (queue_id >= dev->data->nb_rx_queues) + RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP); + if (queue_id >= dev->data->nb_rx_queues || + dev->data->rx_queues[queue_id] == NULL) return -EINVAL; - return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id); + return (int)(*dev->rx_queue_count)(dev, queue_id); } /** @@ -4444,14 +4991,14 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id) * - (-ENODEV) if *port_id* invalid. * - (-ENOTSUP) if the device does not support this function */ +__rte_deprecated static inline int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset) { struct rte_eth_dev *dev = &rte_eth_devices[port_id]; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP); - return (*dev->dev_ops->rx_descriptor_done)( \ - dev->data->rx_queues[queue_id], offset); + RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_done, -ENOTSUP); + return (*dev->rx_descriptor_done)(dev->data->rx_queues[queue_id], offset); } #define RTE_ETH_RX_DESC_AVAIL 0 /**< Desc available for hw. */ @@ -4506,10 +5053,10 @@ rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, if (queue_id >= dev->data->nb_rx_queues) return -ENODEV; #endif - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP); + RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_status, -ENOTSUP); rxq = dev->data->rx_queues[queue_id]; - return (*dev->dev_ops->rx_descriptor_status)(rxq, offset); + return (*dev->rx_descriptor_status)(rxq, offset); } #define RTE_ETH_TX_DESC_FULL 0 /**< Desc filled for hw, waiting xmit. */ @@ -4563,10 +5110,10 @@ static inline int rte_eth_tx_descriptor_status(uint16_t port_id, if (queue_id >= dev->data->nb_tx_queues) return -ENODEV; #endif - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP); + RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_descriptor_status, -ENOTSUP); txq = dev->data->tx_queues[queue_id]; - return (*dev->dev_ops->tx_descriptor_status)(txq, offset); + return (*dev->tx_descriptor_status)(txq, offset); } /** @@ -4652,7 +5199,16 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, #endif #ifdef RTE_ETHDEV_RXTX_CALLBACKS - struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id]; + struct rte_eth_rxtx_callback *cb; + + /* __ATOMIC_RELEASE memory order was used when the + * call back was inserted into the list. + * Since there is a clear dependency between loading + * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is + * not required. + */ + cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id], + __ATOMIC_RELAXED); if (unlikely(cb != NULL)) { do { @@ -4663,6 +5219,8 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, } #endif + rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, + nb_pkts); return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts); } @@ -4716,6 +5274,7 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, * meet devices requirements with rte_errno set appropriately: * - EINVAL: offload flags are not correctly set * - ENOTSUP: the offload feature is not supported by the hardware + * - ENODEV: if *port_id* is invalid (with debug enabled only) * */ @@ -4730,7 +5289,7 @@ rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, #ifdef RTE_LIBRTE_ETHDEV_DEBUG if (!rte_eth_dev_is_valid_port(port_id)) { RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id); - rte_errno = EINVAL; + rte_errno = ENODEV; return 0; } #endif