X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mbuf%2Frte_mbuf.h;h=323a1ac1632c170344b6dec7dd693926777ef54d;hb=97cb466d65c9;hp=d5895eae2783997a2b82426b1671f9c6c0ea10d1;hpb=0d57ac4a34cd46fe255bf7f19c0c8c05685cd974;p=dpdk.git diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index d5895eae27..323a1ac163 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -44,7 +44,14 @@ * buffers. The message buffers are stored in a mempool, using the * RTE mempool library. * - * This library provide an API to allocate/free packet mbufs, which are + * The preferred way to create a mbuf pool is to use + * rte_pktmbuf_pool_create(). However, in some situations, an + * application may want to have more control (ex: populate the pool with + * specific memory), in this case it is possible to use functions from + * rte_mempool. See how rte_pktmbuf_pool_create() is implemented for + * details. + * + * This library provides an API to allocate/free packet mbufs, which are * used to carry network packets. * * To understand the concepts of packet buffers or mbufs, you @@ -60,53 +67,145 @@ #include #include #include +#include #ifdef __cplusplus extern "C" { #endif -/* deprecated options */ -#pragma GCC poison RTE_MBUF_SCATTER_GATHER -#pragma GCC poison RTE_MBUF_REFCNT - /* * Packet Offload Features Flags. It also carry packet type information. * Critical resources. Both rx/tx shared these bits. Be cautious on any change * * - RX flags start at bit position zero, and get added to the left of previous * flags. - * - The most-significant 8 bits are reserved for generic mbuf flags - * - TX flags therefore start at bit position 55 (i.e. 63-8), and new flags get - * added to the right of the previously defined flags + * - The most-significant 3 bits are reserved for generic mbuf flags + * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get + * added to the right of the previously defined flags i.e. they should count + * downwards, not upwards. * * Keep these flags synchronized with rte_get_rx_ol_flag_name() and * rte_get_tx_ol_flag_name(). */ -#define PKT_RX_VLAN_PKT (1ULL << 0) /**< RX packet is a 802.1q VLAN packet. */ + +/** + * RX packet is a 802.1q VLAN packet. This flag was set by PMDs when + * the packet is recognized as a VLAN, but the behavior between PMDs + * was not the same. This flag is kept for some time to avoid breaking + * applications and should be replaced by PKT_RX_VLAN_STRIPPED. + */ +#define PKT_RX_VLAN_PKT (1ULL << 0) + #define PKT_RX_RSS_HASH (1ULL << 1) /**< RX packet with RSS hash result. */ #define PKT_RX_FDIR (1ULL << 2) /**< RX packet with FDIR match indicate. */ -#define PKT_RX_L4_CKSUM_BAD (1ULL << 3) /**< L4 cksum of RX pkt. is not OK. */ -#define PKT_RX_IP_CKSUM_BAD (1ULL << 4) /**< IP cksum of RX pkt. is not OK. */ -#define PKT_RX_EIP_CKSUM_BAD (0ULL << 0) /**< External IP header checksum error. */ -#define PKT_RX_OVERSIZE (0ULL << 0) /**< Num of desc of an RX pkt oversize. */ -#define PKT_RX_HBUF_OVERFLOW (0ULL << 0) /**< Header buffer overflow. */ -#define PKT_RX_RECIP_ERR (0ULL << 0) /**< Hardware processing error. */ -#define PKT_RX_MAC_ERR (0ULL << 0) /**< MAC error. */ -#define PKT_RX_IPV4_HDR (1ULL << 5) /**< RX packet with IPv4 header. */ -#define PKT_RX_IPV4_HDR_EXT (1ULL << 6) /**< RX packet with extended IPv4 header. */ -#define PKT_RX_IPV6_HDR (1ULL << 7) /**< RX packet with IPv6 header. */ -#define PKT_RX_IPV6_HDR_EXT (1ULL << 8) /**< RX packet with extended IPv6 header. */ + +/** + * Deprecated. + * Checking this flag alone is deprecated: check the 2 bits of + * PKT_RX_L4_CKSUM_MASK. + * This flag was set when the L4 checksum of a packet was detected as + * wrong by the hardware. + */ +#define PKT_RX_L4_CKSUM_BAD (1ULL << 3) + +/** + * Deprecated. + * Checking this flag alone is deprecated: check the 2 bits of + * PKT_RX_IP_CKSUM_MASK. + * This flag was set when the IP checksum of a packet was detected as + * wrong by the hardware. + */ +#define PKT_RX_IP_CKSUM_BAD (1ULL << 4) + +#define PKT_RX_EIP_CKSUM_BAD (1ULL << 5) /**< External IP header checksum error. */ + +/** + * A vlan has been stripped by the hardware and its tci is saved in + * mbuf->vlan_tci. This can only happen if vlan stripping is enabled + * in the RX configuration of the PMD. + */ +#define PKT_RX_VLAN_STRIPPED (1ULL << 6) + +/** + * Mask of bits used to determine the status of RX IP checksum. + * - PKT_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum + * - PKT_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong + * - PKT_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid + * - PKT_RX_IP_CKSUM_NONE: the IP checksum is not correct in the packet + * data, but the integrity of the IP header is verified. + */ +#define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7)) + +#define PKT_RX_IP_CKSUM_UNKNOWN 0 +#define PKT_RX_IP_CKSUM_BAD (1ULL << 4) +#define PKT_RX_IP_CKSUM_GOOD (1ULL << 7) +#define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7)) + +/** + * Mask of bits used to determine the status of RX L4 checksum. + * - PKT_RX_L4_CKSUM_UNKNOWN: no information about the RX L4 checksum + * - PKT_RX_L4_CKSUM_BAD: the L4 checksum in the packet is wrong + * - PKT_RX_L4_CKSUM_GOOD: the L4 checksum in the packet is valid + * - PKT_RX_L4_CKSUM_NONE: the L4 checksum is not correct in the packet + * data, but the integrity of the L4 data is verified. + */ +#define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8)) + +#define PKT_RX_L4_CKSUM_UNKNOWN 0 +#define PKT_RX_L4_CKSUM_BAD (1ULL << 3) +#define PKT_RX_L4_CKSUM_GOOD (1ULL << 8) +#define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8)) + #define PKT_RX_IEEE1588_PTP (1ULL << 9) /**< RX IEEE1588 L2 Ethernet PT Packet. */ #define PKT_RX_IEEE1588_TMST (1ULL << 10) /**< RX IEEE1588 L2/L4 timestamped packet.*/ -#define PKT_RX_TUNNEL_IPV4_HDR (1ULL << 11) /**< RX tunnel packet with IPv4 header.*/ -#define PKT_RX_TUNNEL_IPV6_HDR (1ULL << 12) /**< RX tunnel packet with IPv6 header. */ #define PKT_RX_FDIR_ID (1ULL << 13) /**< FD id reported if FDIR match. */ #define PKT_RX_FDIR_FLX (1ULL << 14) /**< Flexible bytes reported if FDIR match. */ -#define PKT_RX_QINQ_PKT (1ULL << 15) /**< RX packet with double VLAN stripped. */ + +/** + * The 2 vlans have been stripped by the hardware and their tci are + * saved in mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer). + * This can only happen if vlan stripping is enabled in the RX + * configuration of the PMD. If this flag is set, PKT_RX_VLAN_STRIPPED + * must also be set. + */ +#define PKT_RX_QINQ_STRIPPED (1ULL << 15) + +/** + * Deprecated. + * RX packet with double VLAN stripped. + * This flag is replaced by PKT_RX_QINQ_STRIPPED. + */ +#define PKT_RX_QINQ_PKT PKT_RX_QINQ_STRIPPED + +/** + * When packets are coalesced by a hardware or virtual driver, this flag + * can be set in the RX mbuf, meaning that the m->tso_segsz field is + * valid and is set to the segment size of original packets. + */ +#define PKT_RX_LRO (1ULL << 16) + /* add new RX flags here */ /* add new TX flags here */ +/** + * Offload the MACsec. This flag must be set by the application to enable + * this offload feature for a packet to be transmitted. + */ +#define PKT_TX_MACSEC (1ULL << 44) + +/** + * Bits 45:48 used for the tunnel type. + * When doing Tx offload like TSO or checksum, the HW needs to configure the + * tunnel type into the HW descriptors. + */ +#define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45) +#define PKT_TX_TUNNEL_GRE (0x2ULL << 45) +#define PKT_TX_TUNNEL_IPIP (0x3ULL << 45) +#define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45) +/* add new TX TUNNEL type here */ +#define PKT_TX_TUNNEL_MASK (0xFULL << 45) + /** * Second VLAN insertion (QinQ) flag. */ @@ -197,11 +296,31 @@ extern "C" { */ #define PKT_TX_OUTER_IPV6 (1ULL << 60) +/** + * Bitmask of all supported packet Tx offload features flags, + * which can be set for packet. + */ +#define PKT_TX_OFFLOAD_MASK ( \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_OUTER_IP_CKSUM | \ + PKT_TX_TCP_SEG | \ + PKT_TX_IEEE1588_TMST | \ + PKT_TX_QINQ_PKT | \ + PKT_TX_VLAN_PKT | \ + PKT_TX_TUNNEL_MASK | \ + PKT_TX_MACSEC) + +#define __RESERVED (1ULL << 61) /**< reserved for future mbuf use */ + #define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */ /* Use final bit of flags to indicate a control mbuf */ #define CTRL_MBUF_FLAG (1ULL << 63) /**< Mbuf contains control data */ +/** Alignment constraint of mbuf private area. */ +#define RTE_MBUF_PRIV_ALIGN 8 + /** * Get the name of a RX offload flag * @@ -212,6 +331,20 @@ extern "C" { */ const char *rte_get_rx_ol_flag_name(uint64_t mask); +/** + * Dump the list of RX offload flags in a buffer + * + * @param mask + * The mask describing the RX flags. + * @param buf + * The output buffer. + * @param buflen + * The length of the buffer. + * @return + * 0 on success, (-1) on error. + */ +int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen); + /** * Get the name of a TX offload flag * @@ -224,6 +357,20 @@ const char *rte_get_rx_ol_flag_name(uint64_t mask); */ const char *rte_get_tx_ol_flag_name(uint64_t mask); +/** + * Dump the list of TX offload flags in a buffer + * + * @param mask + * The mask describing the TX flags. + * @param buf + * The output buffer. + * @param buflen + * The length of the buffer. + * @return + * 0 on success, (-1) on error. + */ +int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen); + /** * Some NICs need at least 2KB buffer to RX standard Ethernet frame without * splitting it into multiple segments. @@ -236,8 +383,11 @@ const char *rte_get_tx_ol_flag_name(uint64_t mask); /* define a set of marker types that can be used to refer to set points in the * mbuf */ +__extension__ typedef void *MARKER[0]; /**< generic marker for a point in a structure */ +__extension__ typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */ +__extension__ typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes * with a single assignment */ @@ -250,46 +400,64 @@ struct rte_mbuf { void *buf_addr; /**< Virtual address of segment buffer. */ phys_addr_t buf_physaddr; /**< Physical address of segment buffer. */ - uint16_t buf_len; /**< Length of segment buffer. */ - - /* next 6 bytes are initialised on RX descriptor rearm */ - MARKER8 rearm_data; + /* next 8 bytes are initialised on RX descriptor rearm */ + MARKER64 rearm_data; uint16_t data_off; /** - * 16-bit Reference counter. + * Reference counter. Its size should at least equal to the size + * of port field (16 bits), to support zero-copy broadcast. * It should only be accessed using the following functions: * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and * rte_mbuf_refcnt_set(). The functionality of these functions (atomic, * or non-atomic) is controlled by the CONFIG_RTE_MBUF_REFCNT_ATOMIC * config option. */ + RTE_STD_C11 union { rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */ uint16_t refcnt; /**< Non-atomically accessed refcnt */ }; - uint8_t nb_segs; /**< Number of segments. */ - uint8_t port; /**< Input port. */ + uint16_t nb_segs; /**< Number of segments. */ + + /** Input port (16 bits to support more than 256 virtual ports). */ + uint16_t port; uint64_t ol_flags; /**< Offload features. */ /* remaining bytes are set on RX when pulling packet from descriptor */ MARKER rx_descriptor_fields1; - /** - * The packet type, which is used to indicate ordinary packet and also - * tunneled packet format, i.e. each number is represented a type of - * packet. + /* + * The packet type, which is the combination of outer/inner L2, L3, L4 + * and tunnel types. The packet_type is about data really present in the + * mbuf. Example: if vlan stripping is enabled, a received vlan packet + * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the + * vlan is stripped from the data. */ - uint16_t packet_type; + RTE_STD_C11 + union { + uint32_t packet_type; /**< L2/L3/L4 and tunnel information. */ + struct { + uint32_t l2_type:4; /**< (Outer) L2 type. */ + uint32_t l3_type:4; /**< (Outer) L3 type. */ + uint32_t l4_type:4; /**< (Outer) L4 type. */ + uint32_t tun_type:4; /**< Tunnel type. */ + uint32_t inner_l2_type:4; /**< Inner L2 type. */ + uint32_t inner_l3_type:4; /**< Inner L3 type. */ + uint32_t inner_l4_type:4; /**< Inner L4 type. */ + }; + }; - uint16_t data_len; /**< Amount of data in segment buffer. */ uint32_t pkt_len; /**< Total pkt len: sum of all segments. */ - uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */ - uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ + uint16_t data_len; /**< Amount of data in segment buffer. */ + /** VLAN TCI (CPU order), valid if PKT_RX_VLAN_STRIPPED is set. */ + uint16_t vlan_tci; + union { uint32_t rss; /**< RSS hash result if RSS enabled */ struct { + RTE_STD_C11 union { struct { uint16_t hash; @@ -302,15 +470,23 @@ struct rte_mbuf { /**< First 4 flexible bytes or FD ID, dependent on PKT_RX_FDIR_* flag in ol_flags. */ } fdir; /**< Filter identifier if FDIR enabled */ - uint32_t sched; /**< Hierarchical scheduler */ + struct { + uint32_t lo; + uint32_t hi; + } sched; /**< Hierarchical scheduler */ uint32_t usr; /**< User defined tags. See rte_distributor_process() */ } hash; /**< hash information */ uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */ + /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ_STRIPPED is set. */ + uint16_t vlan_tci_outer; + + uint16_t buf_len; /**< Length of segment buffer. */ /* second cache line - fields only used in slow path or on TX */ - MARKER cacheline1 __rte_cache_aligned; + MARKER cacheline1 __rte_cache_min_aligned; + RTE_STD_C11 union { void *userdata; /**< Can be used for external metadata */ uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */ @@ -320,10 +496,15 @@ struct rte_mbuf { struct rte_mbuf *next; /**< Next segment of scattered packet. */ /* fields to support TX offloads */ + RTE_STD_C11 union { uint64_t tx_offload; /**< combined for easy fetch */ + __extension__ struct { - uint64_t l2_len:7; /**< L2 (MAC) Header Length. */ + uint64_t l2_len:7; + /**< L2 (MAC) Header Length for non-tunneling pkt. + * Outer_L4_len + ... + Inner_L2_len for tunneling pkt. + */ uint64_t l3_len:9; /**< L3 (IP) Header Length. */ uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */ uint64_t tso_segsz:16; /**< TCP TSO segment size */ @@ -344,8 +525,78 @@ struct rte_mbuf { uint16_t timesync; } __rte_cache_aligned; +/** + * Prefetch the first part of the mbuf + * + * The first 64 bytes of the mbuf corresponds to fields that are used early + * in the receive path. If the cache line of the architecture is higher than + * 64B, the second part will also be prefetched. + * + * @param m + * The pointer to the mbuf. + */ +static inline void +rte_mbuf_prefetch_part1(struct rte_mbuf *m) +{ + rte_prefetch0(&m->cacheline0); +} + +/** + * Prefetch the second part of the mbuf + * + * The next 64 bytes of the mbuf corresponds to fields that are used in the + * transmit path. If the cache line of the architecture is higher than 64B, + * this function does nothing as it is expected that the full mbuf is + * already in cache. + * + * @param m + * The pointer to the mbuf. + */ +static inline void +rte_mbuf_prefetch_part2(struct rte_mbuf *m) +{ +#if RTE_CACHE_LINE_SIZE == 64 + rte_prefetch0(&m->cacheline1); +#else + RTE_SET_USED(m); +#endif +} + + static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp); +/** + * Return the DMA address of the beginning of the mbuf data + * + * @param mb + * The pointer to the mbuf. + * @return + * The physical address of the beginning of the mbuf data + */ +static inline phys_addr_t +rte_mbuf_data_dma_addr(const struct rte_mbuf *mb) +{ + return mb->buf_physaddr + mb->data_off; +} + +/** + * Return the default DMA address of the beginning of the mbuf data + * + * This function is used by drivers in their receive function, as it + * returns the location where data should be written by the NIC, taking + * the default headroom in account. + * + * @param mb + * The pointer to the mbuf. + * @return + * The physical address of the beginning of the mbuf data + */ +static inline phys_addr_t +rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb) +{ + return mb->buf_physaddr + RTE_PKTMBUF_HEADROOM; +} + /** * Return the mbuf owning the data buffer address of an indirect mbuf. * @@ -357,7 +608,7 @@ static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp); static inline struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi) { - return RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size); + return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size); } /** @@ -402,29 +653,11 @@ struct rte_pktmbuf_pool_private { /** check mbuf type in debug mode */ #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h) -/** check mbuf type in debug mode if mbuf pointer is not null */ -#define __rte_mbuf_sanity_check_raw(m, is_h) do { \ - if ((m) != NULL) \ - rte_mbuf_sanity_check(m, is_h); \ -} while (0) - -/** MBUF asserts in debug mode */ -#define RTE_MBUF_ASSERT(exp) \ -if (!(exp)) { \ - rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \ -} - #else /* RTE_LIBRTE_MBUF_DEBUG */ /** check mbuf type in debug mode */ #define __rte_mbuf_sanity_check(m, is_h) do { } while (0) -/** check mbuf type in debug mode if mbuf pointer is not null */ -#define __rte_mbuf_sanity_check_raw(m, is_h) do { } while (0) - -/** MBUF asserts in debug mode */ -#define RTE_MBUF_ASSERT(exp) do { } while (0) - #endif /* RTE_LIBRTE_MBUF_DEBUG */ #ifdef RTE_MBUF_REFCNT_ATOMIC @@ -537,9 +770,17 @@ void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header); /** - * @internal Allocate a new mbuf from mempool *mp*. - * The use of that function is reserved for RTE internal needs. - * Please use rte_pktmbuf_alloc(). + * Allocate an unitialized mbuf from mempool *mp*. + * + * This function can be used by PMDs (especially in RX functions) to + * allocate an unitialized mbuf. The driver is responsible of + * initializing all the required fields. See rte_pktmbuf_reset(). + * For standard needs, prefer rte_pktmbuf_alloc(). + * + * The caller can expect that the following fields of the mbuf structure + * are initialized: buf_addr, buf_physaddr, buf_len, refcnt=1, nb_segs=1, + * next=NULL, pool, priv_size. The other fields must be initialized + * by the caller. * * @param mp * The mempool from which mbuf is allocated. @@ -547,33 +788,55 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header); * - The pointer to the new mbuf on success. * - NULL if allocation failed. */ -static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp) +static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp) { struct rte_mbuf *m; void *mb = NULL; + if (rte_mempool_get(mp, &mb) < 0) return NULL; m = (struct rte_mbuf *)mb; - RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0); - rte_mbuf_refcnt_set(m, 1); + RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); + RTE_ASSERT(m->next == NULL); + RTE_ASSERT(m->nb_segs == 1); + __rte_mbuf_sanity_check(m, 0); + return m; } /** - * @internal Put mbuf back into its original mempool. - * The use of that function is reserved for RTE internal needs. - * Please use rte_pktmbuf_free(). + * Put mbuf back into its original mempool. + * + * The caller must ensure that the mbuf is direct and properly + * reinitialized (refcnt=1, next=NULL, nb_segs=1), as done by + * rte_pktmbuf_prefree_seg(). + * + * This function should be used with care, when optimization is + * required. For standard needs, prefer rte_pktmbuf_free() or + * rte_pktmbuf_free_seg(). * * @param m * The mbuf to be freed. */ static inline void __attribute__((always_inline)) -__rte_mbuf_raw_free(struct rte_mbuf *m) +rte_mbuf_raw_free(struct rte_mbuf *m) { - RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0); + RTE_ASSERT(RTE_MBUF_DIRECT(m)); + RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); + RTE_ASSERT(m->next == NULL); + RTE_ASSERT(m->nb_segs == 1); + __rte_mbuf_sanity_check(m, 0); rte_mempool_put(m->pool, m); } +/* compat with older versions */ +__rte_deprecated +static inline void +__rte_mbuf_raw_free(struct rte_mbuf *m) +{ + rte_mbuf_raw_free(m); +} + /* Operations on ctrl mbuf */ /** @@ -582,14 +845,14 @@ __rte_mbuf_raw_free(struct rte_mbuf *m) * This function initializes some fields in an mbuf structure that are * not modified by the user once created (mbuf type, origin pool, buffer * start address, and so on). This function is given as a callback function - * to rte_mempool_create() at pool creation time. + * to rte_mempool_obj_iter() or rte_mempool_create() at pool creation time. * * @param mp * The mempool from which the mbuf is allocated. * @param opaque_arg * A pointer that can be used by the user to retrieve useful information - * for mbuf initialization. This pointer comes from the ``init_arg`` - * parameter of rte_mempool_create(). + * for mbuf initialization. This pointer is the opaque argument passed to + * rte_mempool_obj_iter() or rte_mempool_create(). * @param m * The mbuf to initialize. * @param i @@ -663,14 +926,14 @@ rte_is_ctrlmbuf(struct rte_mbuf *m) * This function initializes some fields in the mbuf structure that are * not modified by the user once created (origin pool, buffer start * address, and so on). This function is given as a callback function to - * rte_mempool_create() at pool creation time. + * rte_mempool_obj_iter() or rte_mempool_create() at pool creation time. * * @param mp * The mempool from which mbufs originate. * @param opaque_arg * A pointer that can be used by the user to retrieve useful information - * for mbuf initialization. This pointer comes from the ``init_arg`` - * parameter of rte_mempool_create(). + * for mbuf initialization. This pointer is the opaque argument passed to + * rte_mempool_obj_iter() or rte_mempool_create(). * @param m * The mbuf to initialize. * @param i @@ -685,7 +948,8 @@ void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, * * This function initializes the mempool private data in the case of a * pktmbuf pool. This private data is needed by the driver. The - * function is given as a callback function to rte_mempool_create() at + * function must be called on the mempool before it is used, or it + * can be given as a callback function to rte_mempool_create() at * pool creation. It can be extended by the user, for example, to * provide another packet size. * @@ -693,8 +957,8 @@ void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, * The mempool from which mbufs originate. * @param opaque_arg * A pointer that can be used by the user to retrieve useful information - * for mbuf initialization. This pointer comes from the ``init_arg`` - * parameter of rte_mempool_create(). + * for mbuf initialization. This pointer is the opaque argument passed to + * rte_mempool_create(). */ void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg); @@ -702,8 +966,7 @@ void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg); * Create a mbuf pool. * * This function creates and initializes a packet mbuf pool. It is - * a wrapper to rte_mempool_create() with the proper packet constructor - * and mempool constructor. + * a wrapper to rte_mempool functions. * * @param name * The name of the mbuf pool. @@ -716,7 +979,7 @@ void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg); * details. * @param priv_size * Size of application private are between the rte_mbuf structure - * and the data buffer. + * and the data buffer. This value must be aligned to RTE_MBUF_PRIV_ALIGN. * @param data_room_size * Size of data buffer in each mbuf, including RTE_PKTMBUF_HEADROOM. * @param socket_id @@ -728,7 +991,7 @@ void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg); * with rte_errno set appropriately. Possible rte_errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure * - E_RTE_SECONDARY - function was called from a secondary process instance - * - EINVAL - cache size provided is too large + * - EINVAL - cache size provided is too large, or priv_size is not aligned. * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists * - ENOMEM - no appropriate memory area found in which to create memzone @@ -779,6 +1042,19 @@ rte_pktmbuf_priv_size(struct rte_mempool *mp) return mbp_priv->mbuf_priv_size; } +/** + * Reset the data_off field of a packet mbuf to its default value. + * + * The given mbuf must have only one segment, which should be empty. + * + * @param m + * The packet mbuf's data_off field has to be reset. + */ +static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m) +{ + m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len); +} + /** * Reset the fields of a packet mbuf to their default values. * @@ -799,8 +1075,7 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m) m->ol_flags = 0; m->packet_type = 0; - m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ? - RTE_PKTMBUF_HEADROOM : m->buf_len; + rte_pktmbuf_reset_headroom(m); m->data_len = 0; __rte_mbuf_sanity_check(m, 1); @@ -822,16 +1097,73 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m) static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp) { struct rte_mbuf *m; - if ((m = __rte_mbuf_raw_alloc(mp)) != NULL) + if ((m = rte_mbuf_raw_alloc(mp)) != NULL) rte_pktmbuf_reset(m); return m; } +/** + * Allocate a bulk of mbufs, initialize refcnt and reset the fields to default + * values. + * + * @param pool + * The mempool from which mbufs are allocated. + * @param mbufs + * Array of pointers to mbufs + * @param count + * Array size + * @return + * - 0: Success + */ +static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, + struct rte_mbuf **mbufs, unsigned count) +{ + unsigned idx = 0; + int rc; + + rc = rte_mempool_get_bulk(pool, (void **)mbufs, count); + if (unlikely(rc)) + return rc; + + /* To understand duff's device on loop unwinding optimization, see + * https://en.wikipedia.org/wiki/Duff's_device. + * Here while() loop is used rather than do() while{} to avoid extra + * check if count is zero. + */ + switch (count % 4) { + case 0: + while (idx != count) { + RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0); + rte_mbuf_refcnt_set(mbufs[idx], 1); + rte_pktmbuf_reset(mbufs[idx]); + idx++; + case 3: + RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0); + rte_mbuf_refcnt_set(mbufs[idx], 1); + rte_pktmbuf_reset(mbufs[idx]); + idx++; + case 2: + RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0); + rte_mbuf_refcnt_set(mbufs[idx], 1); + rte_pktmbuf_reset(mbufs[idx]); + idx++; + case 1: + RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0); + rte_mbuf_refcnt_set(mbufs[idx], 1); + rte_pktmbuf_reset(mbufs[idx]); + idx++; + } + } + return 0; +} + /** * Attach packet mbuf to another packet mbuf. * * After attachment we refer the mbuf we attached as 'indirect', * while mbuf we attached to as 'direct'. + * The direct mbuf's reference counter is incremented. + * * Right now, not supported: * - attachment for already indirect mbuf (e.g. - mi has to be direct). * - mbuf we trying to attach (mi) is used by someone else @@ -846,7 +1178,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m) { struct rte_mbuf *md; - RTE_MBUF_ASSERT(RTE_MBUF_DIRECT(mi) && + RTE_ASSERT(RTE_MBUF_DIRECT(mi) && rte_mbuf_refcnt_read(mi) == 1); /* if m is not direct, get the mbuf that embeds the data */ @@ -861,7 +1193,6 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m) mi->buf_addr = m->buf_addr; mi->buf_len = m->buf_len; - mi->next = m->next; mi->data_off = m->data_off; mi->data_len = m->data_len; mi->port = m->port; @@ -885,13 +1216,17 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m) * * - restore original mbuf address and length values. * - reset pktmbuf data and data_len to their default values. - * All other fields of the given packet mbuf will be left intact. + * - decrement the direct mbuf's reference counter. When the + * reference counter becomes 0, the direct mbuf is freed. + * + * All other fields of the given packet mbuf will be left intact. * * @param m * The indirect attached packet mbuf. */ static inline void rte_pktmbuf_detach(struct rte_mbuf *m) { + struct rte_mbuf *md = rte_mbuf_from_indirect(m); struct rte_mempool *mp = m->pool; uint32_t mbuf_size, buf_len, priv_size; @@ -903,33 +1238,75 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m) m->buf_addr = (char *)m + mbuf_size; m->buf_physaddr = rte_mempool_virt2phy(mp, m) + mbuf_size; m->buf_len = (uint16_t)buf_len; - m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len); + rte_pktmbuf_reset_headroom(m); m->data_len = 0; m->ol_flags = 0; + + if (rte_mbuf_refcnt_update(md, -1) == 0) { + md->next = NULL; + md->nb_segs = 1; + rte_mbuf_refcnt_set(md, 1); + rte_mbuf_raw_free(md); + } } -static inline struct rte_mbuf* __attribute__((always_inline)) -__rte_pktmbuf_prefree_seg(struct rte_mbuf *m) +/** + * Decrease reference counter and unlink a mbuf segment + * + * This function does the same than a free, except that it does not + * return the segment to its pool. + * It decreases the reference counter, and if it reaches 0, it is + * detached from its parent for an indirect mbuf. + * + * @param m + * The mbuf to be unlinked + * @return + * - (m) if it is the last reference. It can be recycled or freed. + * - (NULL) if the mbuf still has remaining references on it. + */ +__attribute__((always_inline)) +static inline struct rte_mbuf * +rte_pktmbuf_prefree_seg(struct rte_mbuf *m) { __rte_mbuf_sanity_check(m, 0); - if (likely(rte_mbuf_refcnt_update(m, -1) == 0)) { + if (likely(rte_mbuf_refcnt_read(m) == 1)) { + + if (RTE_MBUF_INDIRECT(m)) + rte_pktmbuf_detach(m); + + if (m->next != NULL) { + m->next = NULL; + m->nb_segs = 1; + } + + return m; + + } else if (rte_atomic16_add_return(&m->refcnt_atomic, -1) == 0) { - /* if this is an indirect mbuf, then - * - detach mbuf - * - free attached mbuf segment - */ - if (RTE_MBUF_INDIRECT(m)) { - struct rte_mbuf *md = rte_mbuf_from_indirect(m); + + if (RTE_MBUF_INDIRECT(m)) rte_pktmbuf_detach(m); - if (rte_mbuf_refcnt_update(md, -1) == 0) - __rte_mbuf_raw_free(md); + + if (m->next != NULL) { + m->next = NULL; + m->nb_segs = 1; } + rte_mbuf_refcnt_set(m, 1); + return m; } return NULL; } +/* deprecated, replaced by rte_pktmbuf_prefree_seg() */ +__rte_deprecated +static inline struct rte_mbuf * +__rte_pktmbuf_prefree_seg(struct rte_mbuf *m) +{ + return rte_pktmbuf_prefree_seg(m); +} + /** * Free a segment of a packet mbuf into its original mempool. * @@ -942,10 +1319,9 @@ __rte_pktmbuf_prefree_seg(struct rte_mbuf *m) static inline void __attribute__((always_inline)) rte_pktmbuf_free_seg(struct rte_mbuf *m) { - if (likely(NULL != (m = __rte_pktmbuf_prefree_seg(m)))) { - m->next = NULL; - __rte_mbuf_raw_free(m); - } + m = rte_pktmbuf_prefree_seg(m); + if (likely(m != NULL)) + rte_mbuf_raw_free(m); } /** @@ -1122,6 +1498,27 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m) */ #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0) +/** + * A macro that returns the physical address that points to an offset of the + * start of the data in the mbuf + * + * @param m + * The packet mbuf. + * @param o + * The offset into the data to calculate address from. + */ +#define rte_pktmbuf_mtophys_offset(m, o) \ + (phys_addr_t)((m)->buf_physaddr + (m)->data_off + (o)) + +/** + * A macro that returns the physical address that points to the start of the + * data in the mbuf + * + * @param m + * The packet mbuf. + */ +#define rte_pktmbuf_mtophys(m) rte_pktmbuf_mtophys_offset(m, 0) + /** * A macro that returns the length of the packet. * @@ -1276,7 +1673,182 @@ static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m) } /** - * Dump an mbuf structure to the console. + * @internal used by rte_pktmbuf_read(). + */ +const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, + uint32_t len, void *buf); + +/** + * Read len data bytes in a mbuf at specified offset. + * + * If the data is contiguous, return the pointer in the mbuf data, else + * copy the data in the buffer provided by the user and return its + * pointer. + * + * @param m + * The pointer to the mbuf. + * @param off + * The offset of the data in the mbuf. + * @param len + * The amount of bytes to read. + * @param buf + * The buffer where data is copied if it is not contigous in mbuf + * data. Its length should be at least equal to the len parameter. + * @return + * The pointer to the data, either in the mbuf if it is contiguous, + * or in the user buffer. If mbuf is too small, NULL is returned. + */ +static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m, + uint32_t off, uint32_t len, void *buf) +{ + if (likely(off + len <= rte_pktmbuf_data_len(m))) + return rte_pktmbuf_mtod_offset(m, char *, off); + else + return __rte_pktmbuf_read(m, off, len, buf); +} + +/** + * Chain an mbuf to another, thereby creating a segmented packet. + * + * Note: The implementation will do a linear walk over the segments to find + * the tail entry. For cases when there are many segments, it's better to + * chain the entries manually. + * + * @param head + * The head of the mbuf chain (the first packet) + * @param tail + * The mbuf to put last in the chain + * + * @return + * - 0, on success. + * - -EOVERFLOW, if the chain is full (256 entries) + */ +static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail) +{ + struct rte_mbuf *cur_tail; + + /* Check for number-of-segments-overflow */ + if (head->nb_segs + tail->nb_segs >= 1 << (sizeof(head->nb_segs) * 8)) + return -EOVERFLOW; + + /* Chain 'tail' onto the old tail */ + cur_tail = rte_pktmbuf_lastseg(head); + cur_tail->next = tail; + + /* accumulate number of segments and total length. */ + head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs); + head->pkt_len += tail->pkt_len; + + /* pkt_len is only set in the head */ + tail->pkt_len = tail->data_len; + + return 0; +} + +/** + * Validate general requirements for Tx offload in mbuf. + * + * This function checks correctness and completeness of Tx offload settings. + * + * @param m + * The packet mbuf to be validated. + * @return + * 0 if packet is valid + */ +static inline int +rte_validate_tx_offload(const struct rte_mbuf *m) +{ + uint64_t ol_flags = m->ol_flags; + uint64_t inner_l3_offset = m->l2_len; + + /* Does packet set any of available offloads? */ + if (!(ol_flags & PKT_TX_OFFLOAD_MASK)) + return 0; + + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + inner_l3_offset += m->outer_l2_len + m->outer_l3_len; + + /* Headers are fragmented */ + if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len) + return -ENOTSUP; + + /* IP checksum can be counted only for IPv4 packet */ + if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6)) + return -EINVAL; + + /* IP type not set when required */ + if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) + if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6))) + return -EINVAL; + + /* Check requirements for TSO packet */ + if (ol_flags & PKT_TX_TCP_SEG) + if ((m->tso_segsz == 0) || + ((ol_flags & PKT_TX_IPV4) && + !(ol_flags & PKT_TX_IP_CKSUM))) + return -EINVAL; + + /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */ + if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) && + !(ol_flags & PKT_TX_OUTER_IPV4)) + return -EINVAL; + + return 0; +} + +/** + * Linearize data in mbuf. + * + * This function moves the mbuf data in the first segment if there is enough + * tailroom. The subsequent segments are unchained and freed. + * + * @param mbuf + * mbuf to linearize + * @return + * - 0, on success + * - -1, on error + */ +static inline int +rte_pktmbuf_linearize(struct rte_mbuf *mbuf) +{ + int seg_len, copy_len; + struct rte_mbuf *m; + struct rte_mbuf *m_next; + char *buffer; + + if (rte_pktmbuf_is_contiguous(mbuf)) + return 0; + + /* Extend first segment to the total packet length */ + copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf); + + if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf))) + return -1; + + buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len); + mbuf->data_len = (uint16_t)(mbuf->pkt_len); + + /* Append data from next segments to the first one */ + m = mbuf->next; + while (m != NULL) { + m_next = m->next; + + seg_len = rte_pktmbuf_data_len(m); + rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len); + buffer += seg_len; + + rte_pktmbuf_free_seg(m); + m = m_next; + } + + mbuf->next = NULL; + mbuf->nb_segs = 1; + + return 0; +} + +/** + * Dump an mbuf structure to a file. * * Dump all fields for the given packet mbuf and all its associated * segments (in the case of a chained buffer).