X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ethdev%2Frte_ethdev.h;h=e341a08817e0a64326e14862e11de22708ddf874;hb=2ca75c65af4ca9c836ca0b4eabec69bde05fbed6;hp=eb6cd01f8dc550868bec6439fa6f84fe1c62c722;hpb=fa1f5fe4d87b54075f515374d41663e3ef150f7f;p=dpdk.git diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h index eb6cd01f8d..e341a08817 100644 --- a/lib/librte_ethdev/rte_ethdev.h +++ b/lib/librte_ethdev/rte_ethdev.h @@ -253,6 +253,7 @@ struct rte_eth_stats { uint64_t ierrors; /**< Total number of erroneous received packets. */ uint64_t oerrors; /**< Total number of failed transmitted packets. */ uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */ + /* Queue stats are limited to max 256 queues */ uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]; /**< Total number of queue RX packets. */ uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]; @@ -323,6 +324,7 @@ struct rte_eth_link { #define ETH_LINK_UP 1 /**< Link is up (see link_status). */ #define ETH_LINK_FIXED 0 /**< No autonegotiation (see link_autoneg). */ #define ETH_LINK_AUTONEG 1 /**< Autonegotiated (see link_autoneg). */ +#define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */ /** * A structure used to configure the ring threshold registers of an RX/TX @@ -553,6 +555,36 @@ struct rte_eth_rss_conf { #define RTE_ETH_RSS_L3_PRE64 (1ULL << 53) #define RTE_ETH_RSS_L3_PRE96 (1ULL << 52) +/* + * Use the following macros to combine with the above layers + * to choose inner and outer layers or both for RSS computation. + * Bits 50 and 51 are reserved for this. + */ + +/** + * level 0, requests the default behavior. + * Depending on the packet type, it can mean outermost, innermost, + * anything in between or even no RSS. + * It basically stands for the innermost encapsulation level RSS + * can be performed on according to PMD and device capabilities. + */ +#define ETH_RSS_LEVEL_PMD_DEFAULT (0ULL << 50) + +/** + * level 1, requests RSS to be performed on the outermost packet + * encapsulation level. + */ +#define ETH_RSS_LEVEL_OUTERMOST (1ULL << 50) + +/** + * level 2, requests RSS to be performed on the specified inner packet + * encapsulation level, from outermost to innermost (lower to higher values). + */ +#define ETH_RSS_LEVEL_INNERMOST (2ULL << 50) +#define ETH_RSS_LEVEL_MASK (3ULL << 50) + +#define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50) + /** * For input set change of hash filter, if SRC_ONLY and DST_ONLY of * the same level are used simultaneously, it is the same case as @@ -938,6 +970,61 @@ struct rte_eth_txmode { void *reserved_ptrs[2]; /**< Reserved for future fields */ }; +/** + * @warning + * @b EXPERIMENTAL: this structure may change without prior notice. + * + * A structure used to configure an Rx packet segment to split. + * + * If RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag is set in offloads field, + * the PMD will split the received packets into multiple segments + * according to the specification in the description array: + * + * - The first network buffer will be allocated from the memory pool, + * specified in the first array element, the second buffer, from the + * pool in the second element, and so on. + * + * - The offsets from the segment description elements specify + * the data offset from the buffer beginning except the first mbuf. + * The first segment offset is added with RTE_PKTMBUF_HEADROOM. + * + * - The lengths in the elements define the maximal data amount + * being received to each segment. The receiving starts with filling + * up the first mbuf data buffer up to specified length. If the + * there are data remaining (packet is longer than buffer in the first + * mbuf) the following data will be pushed to the next segment + * up to its own length, and so on. + * + * - If the length in the segment description element is zero + * the actual buffer size will be deduced from the appropriate + * memory pool properties. + * + * - If there is not enough elements to describe the buffer for entire + * packet of maximal length the following parameters will be used + * for the all remaining segments: + * - pool from the last valid element + * - the buffer size from this pool + * - zero offset + */ +struct rte_eth_rxseg_split { + struct rte_mempool *mp; /**< Memory pool to allocate segment from. */ + uint16_t length; /**< Segment data length, configures split point. */ + uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */ + uint32_t reserved; /**< Reserved field. */ +}; + +/** + * @warning + * @b EXPERIMENTAL: this structure may change without prior notice. + * + * A common structure used to describe Rx packet segment properties. + */ +union rte_eth_rxseg { + /* The settings for buffer split offload. */ + struct rte_eth_rxseg_split split; + /* The other features settings should be added here. */ +}; + /** * A structure used to configure an RX ring of an Ethernet port. */ @@ -946,12 +1033,21 @@ struct rte_eth_rxconf { uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors. */ uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */ uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */ + uint16_t rx_nseg; /**< Number of descriptions in rx_seg array. */ /** * Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags. * Only offloads set on rx_queue_offload_capa or rx_offload_capa * fields on rte_eth_dev_info structure are allowed to be set. */ uint64_t offloads; + /** + * Points to the array of segment descriptions for an entire packet. + * Array elements are properties for consecutive Rx segments. + * + * The supported capabilities of receiving segmentation is reported + * in rte_eth_dev_info.rx_seg_capa field. + */ + union rte_eth_rxseg *rx_seg; uint64_t reserved_64s[2]; /**< Reserved for future fields */ void *reserved_ptrs[2]; /**< Reserved for future fields */ @@ -1014,7 +1110,32 @@ struct rte_eth_hairpin_peer { * A structure used to configure hairpin binding. */ struct rte_eth_hairpin_conf { - uint16_t peer_count; /**< The number of peers. */ + uint32_t peer_count:16; /**< The number of peers. */ + + /** + * Explicit Tx flow rule mode. + * One hairpin pair of queues should have the same attribute. + * + * - When set, the user should be responsible for inserting the hairpin + * Tx part flows and removing them. + * - When clear, the PMD will try to handle the Tx part of the flows, + * e.g., by splitting one flow into two parts. + */ + uint32_t tx_explicit:1; + + /** + * Manually bind hairpin queues. + * One hairpin pair of queues should have the same attribute. + * + * - When set, to enable hairpin, the user should call the hairpin bind + * function after all the queues are set up properly and the ports are + * started. Also, the hairpin unbind function should be called + * accordingly before stopping a port that with hairpin configured. + * - When clear, the PMD will try to enable the hairpin with the queues + * configured automatically during port start. + */ + uint32_t manual_bind:1; + uint32_t reserved:14; /**< Reserved bits. */ struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS]; }; @@ -1229,6 +1350,7 @@ struct rte_eth_conf { #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000 #define DEV_RX_OFFLOAD_RSS_HASH 0x00080000 +#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \ DEV_RX_OFFLOAD_UDP_CKSUM | \ @@ -1344,6 +1466,21 @@ struct rte_eth_switch_info { */ }; +/** + * @warning + * @b EXPERIMENTAL: this structure may change without prior notice. + * + * Ethernet device Rx buffer segmentation capabilities. + */ +struct rte_eth_rxseg_capa { + __extension__ + uint32_t multi_pools:1; /**< Supports receiving to multiple pools.*/ + uint32_t offset_allowed:1; /**< Supports buffer offsets. */ + uint32_t offset_align_log2:4; /**< Required offset alignment. */ + uint16_t max_nseg; /**< Maximum amount of segments to split. */ + uint16_t reserved; /**< Reserved field. */ +}; + /** * Ethernet device information */ @@ -1372,6 +1509,7 @@ struct rte_eth_dev_info { /** Maximum number of hash MAC addresses for MTA and UTA. */ uint16_t max_vfs; /**< Maximum number of VFs. */ uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */ + struct rte_eth_rxseg_capa rx_seg_capa; /**< Segmentation capability.*/ uint64_t rx_offload_capa; /**< All RX offload capabilities including all per-queue ones */ uint64_t tx_offload_capa; @@ -1421,6 +1559,7 @@ struct rte_eth_rxq_info { struct rte_eth_rxconf conf; /**< queue config parameters. */ uint8_t scattered_rx; /**< scattered packets RX supported. */ uint16_t nb_desc; /**< configured number of RXDs. */ + uint16_t rx_buf_size; /**< hardware receive buffer size. */ } __rte_cache_min_aligned; /** @@ -1475,6 +1614,13 @@ struct rte_eth_xstat { * An array of this structure is returned by rte_eth_xstats_get_names(). * It lists the names of extended statistics for a PMD. The *rte_eth_xstat* * structure references these names by their array index. + * + * The xstats should follow a common naming scheme. + * Some names are standardized in rte_stats_strings. + * Examples: + * - rx_missed_errors + * - tx_q3_bytes + * - tx_size_128_to_255_packets */ struct rte_eth_xstat_name { char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */ @@ -1490,13 +1636,13 @@ struct rte_eth_xstat_name { struct rte_eth_dcb_tc_queue_mapping { /** rx queues assigned to tc per Pool */ struct { - uint8_t base; - uint8_t nb_queue; + uint16_t base; + uint16_t nb_queue; } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]; /** rx queues assigned to tc per Pool */ struct { - uint8_t base; - uint8_t nb_queue; + uint16_t base; + uint16_t nb_queue; } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]; }; @@ -1512,6 +1658,29 @@ struct rte_eth_dcb_info { struct rte_eth_dcb_tc_queue_mapping tc_queue; }; +/** + * This enum indicates the possible Forward Error Correction (FEC) modes + * of an ethdev port. + */ +enum rte_eth_fec_mode { + RTE_ETH_FEC_NOFEC = 0, /**< FEC is off */ + RTE_ETH_FEC_AUTO, /**< FEC autonegotiation modes */ + RTE_ETH_FEC_BASER, /**< FEC using common algorithm */ + RTE_ETH_FEC_RS, /**< FEC using RS algorithm */ +}; + +/* Translate from FEC mode to FEC capa */ +#define RTE_ETH_FEC_MODE_TO_CAPA(x) (1U << (x)) + +/* This macro indicates FEC capa mask */ +#define RTE_ETH_FEC_MODE_CAPA_MASK(x) (1U << (RTE_ETH_FEC_ ## x)) + +/* A structure used to get capabilities per link speed */ +struct rte_eth_fec_capa { + uint32_t speed; /**< Link speed (see ETH_SPEED_NUM_*) */ + uint32_t capa; /**< FEC capabilities bitmask */ +}; + #define RTE_ETH_ALL RTE_MAX_ETHPORTS /* Macros to check for valid port */ @@ -1622,11 +1791,8 @@ struct rte_eth_dev_owner { char name[RTE_ETH_MAX_OWNER_NAME_LEN]; /**< The owner name. */ }; -/** - * Port is released (i.e. totally freed and data erased) on close. - * Temporary flag for PMD migration to new rte_eth_dev_close() behaviour. - */ -#define RTE_ETH_DEV_CLOSE_REMOVE 0x0001 +/** PMD supports thread-safe flow operations */ +#define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE 0x0001 /** Device supports link state interrupt */ #define RTE_ETH_DEV_INTR_LSC 0x0002 /** Device is a bonded slave */ @@ -1637,6 +1803,11 @@ struct rte_eth_dev_owner { #define RTE_ETH_DEV_REPRESENTOR 0x0010 /** Device does not support MAC change after started */ #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020 +/** + * Queue xstats filled automatically by ethdev layer. + * PMDs filling the queue xstats themselves should not set this flag + */ +#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040 /** * Iterates over valid ethdev ports owned by a specific owner. @@ -1724,8 +1895,7 @@ rte_eth_find_next_of(uint16_t port_id_start, */ __rte_experimental uint16_t -rte_eth_find_next_sibling(uint16_t port_id_start, - uint16_t ref_port_id); +rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id); /** * Macro to iterate over all ethdev ports sharing the same rte_device @@ -1971,12 +2141,25 @@ rte_eth_dev_is_removed(uint16_t port_id); * No need to repeat any bit in rx_conf->offloads which has already been * enabled in rte_eth_dev_configure() at port level. An offloading enabled * at port level can't be disabled at queue level. + * The configuration structure also contains the pointer to the array + * of the receiving buffer segment descriptions, see rx_seg and rx_nseg + * fields, this extended configuration might be used by split offloads like + * RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT. If mp_pool is not NULL, + * the extended configuration fields must be set to NULL and zero. * @param mb_pool * The pointer to the memory pool from which to allocate *rte_mbuf* network - * memory buffers to populate each descriptor of the receive ring. + * memory buffers to populate each descriptor of the receive ring. There are + * two options to provide Rx buffer configuration: + * - single pool: + * mb_pool is not NULL, rx_conf.rx_nseg is 0. + * - multiple segments description: + * mb_pool is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not 0. + * Taken only if flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT is set in offloads. + * * @return * - 0: Success, receive queue correctly set up. * - -EIO: if device is removed. + * - -ENODEV: if *port_id* is invalid. * - -EINVAL: The memory pool pointer is null or the size of network buffers * which can be allocated from this memory pool does not fit the various * buffer sizes allowed by the device controller. @@ -2011,6 +2194,7 @@ int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, * * @return * - (0) if successful. + * - (-ENODEV) if *port_id* is invalid. * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. * - (-ENOMEM) if unable to allocate the resources. @@ -2092,6 +2276,7 @@ int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, * * @return * - (0) if successful. + * - (-ENODEV) if *port_id* is invalid. * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. * - (-ENOMEM) if unable to allocate the resources. @@ -2101,6 +2286,88 @@ int rte_eth_tx_hairpin_queue_setup (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf); +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Get all the hairpin peer Rx / Tx ports of the current port. + * The caller should ensure that the array is large enough to save the ports + * list. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param peer_ports + * Pointer to the array to store the peer ports list. + * @param len + * Length of the array to store the port identifiers. + * @param direction + * Current port to peer port direction + * positive - current used as Tx to get all peer Rx ports. + * zero - current used as Rx to get all peer Tx ports. + * + * @return + * - (0 or positive) actual peer ports number. + * - (-EINVAL) if bad parameter. + * - (-ENODEV) if *port_id* invalid + * - (-ENOTSUP) if hardware doesn't support. + * - Others detailed errors from PMD drivers. + */ +__rte_experimental +int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, + size_t len, uint32_t direction); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Bind all hairpin Tx queues of one port to the Rx queues of the peer port. + * It is only allowed to call this function after all hairpin queues are + * configured properly and the devices are in started state. + * + * @param tx_port + * The identifier of the Tx port. + * @param rx_port + * The identifier of peer Rx port. + * RTE_MAX_ETHPORTS is allowed for the traversal of all devices. + * Rx port ID could have the same value as Tx port ID. + * + * @return + * - (0) if successful. + * - (-ENODEV) if Tx port ID is invalid. + * - (-EBUSY) if device is not in started state. + * - (-ENOTSUP) if hardware doesn't support. + * - Others detailed errors from PMD drivers. + */ +__rte_experimental +int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Unbind all hairpin Tx queues of one port from the Rx queues of the peer port. + * This should be called before closing the Tx or Rx devices, if the bind + * function is called before. + * After unbinding the hairpin ports pair, it is allowed to bind them again. + * Changing queues configuration should be after stopping the device(s). + * + * @param tx_port + * The identifier of the Tx port. + * @param rx_port + * The identifier of peer Rx port. + * RTE_MAX_ETHPORTS is allowed for traversal of all devices. + * Rx port ID could have the same value as Tx port ID. + * + * @return + * - (0) if successful. + * - (-ENODEV) if Tx port ID is invalid. + * - (-EBUSY) if device is in stopped state. + * - (-ENOTSUP) if hardware doesn't support. + * - Others detailed errors from PMD drivers. + */ +__rte_experimental +int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port); + /** * Return the NUMA socket to which an Ethernet device is connected * @@ -2136,7 +2403,8 @@ int rte_eth_dev_is_valid_port(uint16_t port_id); * to rte_eth_dev_configure(). * @return * - 0: Success, the receive queue is started. - * - -EINVAL: The port_id or the queue_id out of range or belong to hairpin. + * - -ENODEV: if *port_id* is invalid. + * - -EINVAL: The queue_id out of range or belong to hairpin. * - -EIO: if device is removed. * - -ENOTSUP: The function not supported in PMD driver. */ @@ -2153,7 +2421,8 @@ int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id); * to rte_eth_dev_configure(). * @return * - 0: Success, the receive queue is stopped. - * - -EINVAL: The port_id or the queue_id out of range or belong to hairpin. + * - -ENODEV: if *port_id* is invalid. + * - -EINVAL: The queue_id out of range or belong to hairpin. * - -EIO: if device is removed. * - -ENOTSUP: The function not supported in PMD driver. */ @@ -2171,7 +2440,8 @@ int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id); * to rte_eth_dev_configure(). * @return * - 0: Success, the transmit queue is started. - * - -EINVAL: The port_id or the queue_id out of range or belong to hairpin. + * - -ENODEV: if *port_id* is invalid. + * - -EINVAL: The queue_id out of range or belong to hairpin. * - -EIO: if device is removed. * - -ENOTSUP: The function not supported in PMD driver. */ @@ -2188,7 +2458,8 @@ int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id); * to rte_eth_dev_configure(). * @return * - 0: Success, the transmit queue is stopped. - * - -EINVAL: The port_id or the queue_id out of range or belong to hairpin. + * - -ENODEV: if *port_id* is invalid. + * - -EINVAL: The queue_id out of range or belong to hairpin. * - -EIO: if device is removed. * - -ENOTSUP: The function not supported in PMD driver. */ @@ -2221,8 +2492,11 @@ int rte_eth_dev_start(uint16_t port_id); * * @param port_id * The port identifier of the Ethernet device. + * @return + * - 0: Success, Ethernet device stopped. + * - <0: Error code of the driver device stop function. */ -void rte_eth_dev_stop(uint16_t port_id); +int rte_eth_dev_stop(uint16_t port_id); /** * Link up an Ethernet device. @@ -2251,13 +2525,15 @@ int rte_eth_dev_set_link_down(uint16_t port_id); /** * Close a stopped Ethernet device. The device cannot be restarted! - * The function frees all port resources if the driver supports - * the flag RTE_ETH_DEV_CLOSE_REMOVE. + * The function frees all port resources. * * @param port_id * The port identifier of the Ethernet device. + * @return + * - Zero if the port is closed successfully. + * - Negative if something went wrong. */ -void rte_eth_dev_close(uint16_t port_id); +int rte_eth_dev_close(uint16_t port_id); /** * Reset a Ethernet device and keep its port id. @@ -2289,7 +2565,7 @@ void rte_eth_dev_close(uint16_t port_id); * * @return * - (0) if successful. - * - (-EINVAL) if port identifier is invalid. + * - (-ENODEV) if *port_id* is invalid. * - (-ENOTSUP) if hardware doesn't support this function. * - (-EPERM) if not ran from the primary process. * - (-EIO) if re-initialisation failed or device is removed. @@ -2407,6 +2683,44 @@ int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link); */ int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * The function converts a link_speed to a string. It handles all special + * values like unknown or none speed. + * + * @param link_speed + * link_speed of rte_eth_link struct + * @return + * Link speed in textual format. It's pointer to immutable memory. + * No free is required. + */ +__rte_experimental +const char *rte_eth_link_speed_to_str(uint32_t link_speed); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * The function converts a rte_eth_link struct representing a link status to + * a string. + * + * @param str + * A pointer to a string to be filled with textual representation of + * device status. At least ETH_LINK_MAX_STR_LEN bytes should be allocated to + * store default link status text. + * @param len + * Length of available memory at 'str' string. + * @param eth_link + * Link status returned by rte_eth_link_get function + * @return + * Number of bytes written to str array. + */ +__rte_experimental +int rte_eth_link_to_str(char *str, size_t len, + const struct rte_eth_link *eth_link); + /** * Retrieve the general I/O statistics of an Ethernet device. * @@ -2606,6 +2920,7 @@ int rte_eth_xstats_reset(uint16_t port_id); * The per-queue packet statistics functionality number that the transmit * queue is to be assigned. * The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1]. + * Max RTE_ETHDEV_QUEUE_STAT_CNTRS being 256. * @return * Zero if successful. Non-zero otherwise. */ @@ -2626,6 +2941,7 @@ int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, * The per-queue packet statistics functionality number that the receive * queue is to be assigned. * The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1]. + * Max RTE_ETHDEV_QUEUE_STAT_CNTRS being 256. * @return * Zero if successful. Non-zero otherwise. */ @@ -3327,6 +3643,88 @@ int rte_eth_led_on(uint16_t port_id); */ int rte_eth_led_off(uint16_t port_id); +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Get Forward Error Correction(FEC) capability. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param speed_fec_capa + * speed_fec_capa is out only with per-speed capabilities. + * If set to NULL, the function returns the required number + * of required array entries. + * @param num + * a number of elements in an speed_fec_capa array. + * + * @return + * - A non-negative value lower or equal to num: success. The return value + * is the number of entries filled in the fec capa array. + * - A non-negative value higher than num: error, the given fec capa array + * is too small. The return value corresponds to the num that should + * be given to succeed. The entries in fec capa array are not valid and + * shall not be used by the caller. + * - (-ENOTSUP) if underlying hardware OR driver doesn't support. + * that operation. + * - (-EIO) if device is removed. + * - (-ENODEV) if *port_id* invalid. + * - (-EINVAL) if *num* or *speed_fec_capa* invalid + */ +__rte_experimental +int rte_eth_fec_get_capability(uint16_t port_id, + struct rte_eth_fec_capa *speed_fec_capa, + unsigned int num); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Get current Forward Error Correction(FEC) mode. + * If link is down and AUTO is enabled, AUTO is returned, otherwise, + * configured FEC mode is returned. + * If link is up, current FEC mode is returned. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fec_capa + * A bitmask of enabled FEC modes. If AUTO bit is set, other + * bits specify FEC modes which may be negotiated. If AUTO + * bit is clear, specify FEC modes to be used (only one valid + * mode per speed may be set). + * @return + * - (0) if successful. + * - (-ENOTSUP) if underlying hardware OR driver doesn't support. + * that operation. + * - (-EIO) if device is removed. + * - (-ENODEV) if *port_id* invalid. + */ +__rte_experimental +int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Set Forward Error Correction(FEC) mode. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fec_capa + * A bitmask of allowed FEC modes. If AUTO bit is set, other + * bits specify FEC modes which may be negotiated. If AUTO + * bit is clear, specify FEC modes to be used (only one valid + * mode per speed may be set). + * @return + * - (0) if successful. + * - (-EINVAL) if the FEC mode is not valid. + * - (-ENOTSUP) if underlying hardware OR driver doesn't support. + * - (-EIO) if device is removed. + * - (-ENODEV) if *port_id* invalid. + */ +__rte_experimental +int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa); + /** * Get current status of the Ethernet link flow control for Ethernet device * @@ -3443,6 +3841,7 @@ int rte_eth_dev_default_mac_addr_set(uint16_t port_id, * rte_eth_dev_info_get(). * @return * - (0) if successful. + * - (-ENODEV) if *port_id* is invalid. * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. * - (-EIO) if device is removed. @@ -3464,6 +3863,7 @@ int rte_eth_dev_rss_reta_update(uint16_t port_id, * rte_eth_dev_info_get(). * @return * - (0) if successful. + * - (-ENODEV) if *port_id* is invalid. * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. * - (-EIO) if device is removed. @@ -3734,7 +4134,8 @@ struct rte_eth_rxtx_callback; * The callback function * @param user_param * A generic pointer parameter which will be passed to each invocation of the - * callback function on this port and queue. + * callback function on this port and queue. Inter-thread synchronization + * of any user data changes is the responsibility of the user. * * @return * NULL on error. @@ -3763,7 +4164,8 @@ rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, * The callback function * @param user_param * A generic pointer parameter which will be passed to each invocation of the - * callback function on this port and queue. + * callback function on this port and queue. Inter-thread synchronization + * of any user data changes is the responsibility of the user. * * @return * NULL on error. @@ -3791,7 +4193,8 @@ rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, * The callback function * @param user_param * A generic pointer parameter which will be passed to each invocation of the - * callback function on this port and queue. + * callback function on this port and queue. Inter-thread synchronization + * of any user data changes is the responsibility of the user. * * @return * NULL on error. @@ -3816,7 +4219,9 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, * on that queue. * * - After a short delay - where the delay is sufficient to allow any - * in-flight callbacks to complete. + * in-flight callbacks to complete. Alternately, the RCU mechanism can be + * used to detect when data plane threads have ceased referencing the + * callback memory. * * @param port_id * The port identifier of the Ethernet device. @@ -3827,8 +4232,9 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, * * @return * - 0: Success. Callback was removed. + * - -ENODEV: If *port_id* is invalid. * - -ENOTSUP: Callback support is not available. - * - -EINVAL: The port_id or the queue_id is out of range, or the callback + * - -EINVAL: The queue_id is out of range, or the callback * is NULL or not found for the port/queue. */ int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, @@ -3849,7 +4255,9 @@ int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, * on that queue. * * - After a short delay - where the delay is sufficient to allow any - * in-flight callbacks to complete. + * in-flight callbacks to complete. Alternately, the RCU mechanism can be + * used to detect when data plane threads have ceased referencing the + * callback memory. * * @param port_id * The port identifier of the Ethernet device. @@ -3860,8 +4268,9 @@ int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, * * @return * - 0: Success. Callback was removed. + * - -ENODEV: If *port_id* is invalid. * - -ENOTSUP: Callback support is not available. - * - -EINVAL: The port_id or the queue_id is out of range, or the callback + * - -EINVAL: The queue_id is out of range, or the callback * is NULL or not found for the port/queue. */ int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, @@ -3881,8 +4290,9 @@ int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, * * @return * - 0: Success + * - -ENODEV: If *port_id* is invalid. * - -ENOTSUP: routine is not supported by the device PMD. - * - -EINVAL: The port_id or the queue_id is out of range, or the queue + * - -EINVAL: The queue_id is out of range, or the queue * is hairpin queue. */ int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, @@ -3902,8 +4312,9 @@ int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, * * @return * - 0: Success + * - -ENODEV: If *port_id* is invalid. * - -ENOTSUP: routine is not supported by the device PMD. - * - -EINVAL: The port_id or the queue_id is out of range, or the queue + * - -EINVAL: The queue_id is out of range, or the queue * is hairpin queue. */ int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, @@ -3923,8 +4334,9 @@ int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, * * @return * - 0: Success + * - -ENODEV: If *port_id* is invalid. * - -ENOTSUP: routine is not supported by the device PMD. - * - -EINVAL: The port_id or the queue_id is out of range. + * - -EINVAL: The queue_id is out of range. */ __rte_experimental int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, @@ -3944,8 +4356,9 @@ int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, * * @return * - 0: Success + * - -ENODEV: If *port_id* is invalid. * - -ENOTSUP: routine is not supported by the device PMD. - * - -EINVAL: The port_id or the queue_id is out of range. + * - -EINVAL: The queue_id is out of range. */ __rte_experimental int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, @@ -4334,6 +4747,7 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id); * Buffer of size RTE_ETH_NAME_MAX_LEN to store the name. * @return * - (0) if successful. +* - (-ENODEV) if *port_id* is invalid. * - (-EINVAL) on failure. */ int @@ -4510,10 +4924,18 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, rx_pkts, nb_pkts); #ifdef RTE_ETHDEV_RXTX_CALLBACKS - if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) { - struct rte_eth_rxtx_callback *cb = - dev->post_rx_burst_cbs[queue_id]; + struct rte_eth_rxtx_callback *cb; + /* __ATOMIC_RELEASE memory order was used when the + * call back was inserted into the list. + * Since there is a clear dependency between loading + * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is + * not required. + */ + cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id], + __ATOMIC_RELAXED); + + if (unlikely(cb != NULL)) { do { nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx, nb_pkts, cb->param); @@ -4535,7 +4957,8 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, * The queue id on the specific port. * @return * The number of used descriptors in the specific queue, or: - * (-EINVAL) if *port_id* or *queue_id* is invalid + * - (-ENODEV) if *port_id* is invalid. + * (-EINVAL) if *queue_id* is invalid * (-ENOTSUP) if the device does not support this function */ static inline int @@ -4543,13 +4966,14 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id) { struct rte_eth_dev *dev; - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP); - if (queue_id >= dev->data->nb_rx_queues) + RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP); + if (queue_id >= dev->data->nb_rx_queues || + dev->data->rx_queues[queue_id] == NULL) return -EINVAL; - return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id); + return (int)(*dev->rx_queue_count)(dev, queue_id); } /** @@ -4573,9 +4997,8 @@ rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset) { struct rte_eth_dev *dev = &rte_eth_devices[port_id]; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP); - return (*dev->dev_ops->rx_descriptor_done)( \ - dev->data->rx_queues[queue_id], offset); + RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_done, -ENOTSUP); + return (*dev->rx_descriptor_done)(dev->data->rx_queues[queue_id], offset); } #define RTE_ETH_RX_DESC_AVAIL 0 /**< Desc available for hw. */ @@ -4630,10 +5053,10 @@ rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, if (queue_id >= dev->data->nb_rx_queues) return -ENODEV; #endif - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP); + RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_status, -ENOTSUP); rxq = dev->data->rx_queues[queue_id]; - return (*dev->dev_ops->rx_descriptor_status)(rxq, offset); + return (*dev->rx_descriptor_status)(rxq, offset); } #define RTE_ETH_TX_DESC_FULL 0 /**< Desc filled for hw, waiting xmit. */ @@ -4687,10 +5110,10 @@ static inline int rte_eth_tx_descriptor_status(uint16_t port_id, if (queue_id >= dev->data->nb_tx_queues) return -ENODEV; #endif - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP); + RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_descriptor_status, -ENOTSUP); txq = dev->data->tx_queues[queue_id]; - return (*dev->dev_ops->tx_descriptor_status)(txq, offset); + return (*dev->tx_descriptor_status)(txq, offset); } /** @@ -4776,7 +5199,16 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, #endif #ifdef RTE_ETHDEV_RXTX_CALLBACKS - struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id]; + struct rte_eth_rxtx_callback *cb; + + /* __ATOMIC_RELEASE memory order was used when the + * call back was inserted into the list. + * Since there is a clear dependency between loading + * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is + * not required. + */ + cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id], + __ATOMIC_RELAXED); if (unlikely(cb != NULL)) { do { @@ -4842,6 +5274,7 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, * meet devices requirements with rte_errno set appropriately: * - EINVAL: offload flags are not correctly set * - ENOTSUP: the offload feature is not supported by the hardware + * - ENODEV: if *port_id* is invalid (with debug enabled only) * */ @@ -4856,7 +5289,7 @@ rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, #ifdef RTE_LIBRTE_ETHDEV_DEBUG if (!rte_eth_dev_is_valid_port(port_id)) { RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id); - rte_errno = EINVAL; + rte_errno = ENODEV; return 0; } #endif