X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mbuf%2Frte_mbuf.h;h=11fa06d7542fceead770aa177e5be08f85606707;hb=62dac65a326557939352f7ef62ac982a3a9f5a4a;hp=115c560120fbd346c53e80afeaace4dfe3e4875d;hpb=de6f5034f5c6e7a52ee24bc774beeb84d66d4d89;p=dpdk.git diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index 115c560120..11fa06d754 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -65,19 +65,16 @@ extern "C" { #endif -/* deprecated options */ -#pragma GCC poison RTE_MBUF_SCATTER_GATHER -#pragma GCC poison RTE_MBUF_REFCNT - /* * Packet Offload Features Flags. It also carry packet type information. * Critical resources. Both rx/tx shared these bits. Be cautious on any change * * - RX flags start at bit position zero, and get added to the left of previous * flags. - * - The most-significant 8 bits are reserved for generic mbuf flags - * - TX flags therefore start at bit position 55 (i.e. 63-8), and new flags get - * added to the right of the previously defined flags + * - The most-significant 3 bits are reserved for generic mbuf flags + * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get + * added to the right of the previously defined flags i.e. they should count + * downwards, not upwards. * * Keep these flags synchronized with rte_get_rx_ol_flag_name() and * rte_get_tx_ol_flag_name(). @@ -87,23 +84,13 @@ extern "C" { #define PKT_RX_FDIR (1ULL << 2) /**< RX packet with FDIR match indicate. */ #define PKT_RX_L4_CKSUM_BAD (1ULL << 3) /**< L4 cksum of RX pkt. is not OK. */ #define PKT_RX_IP_CKSUM_BAD (1ULL << 4) /**< IP cksum of RX pkt. is not OK. */ -#define PKT_RX_EIP_CKSUM_BAD (0ULL << 0) /**< External IP header checksum error. */ +#define PKT_RX_EIP_CKSUM_BAD (1ULL << 5) /**< External IP header checksum error. */ #define PKT_RX_OVERSIZE (0ULL << 0) /**< Num of desc of an RX pkt oversize. */ #define PKT_RX_HBUF_OVERFLOW (0ULL << 0) /**< Header buffer overflow. */ #define PKT_RX_RECIP_ERR (0ULL << 0) /**< Hardware processing error. */ #define PKT_RX_MAC_ERR (0ULL << 0) /**< MAC error. */ -#ifndef RTE_NEXT_ABI -#define PKT_RX_IPV4_HDR (1ULL << 5) /**< RX packet with IPv4 header. */ -#define PKT_RX_IPV4_HDR_EXT (1ULL << 6) /**< RX packet with extended IPv4 header. */ -#define PKT_RX_IPV6_HDR (1ULL << 7) /**< RX packet with IPv6 header. */ -#define PKT_RX_IPV6_HDR_EXT (1ULL << 8) /**< RX packet with extended IPv6 header. */ -#endif /* RTE_NEXT_ABI */ #define PKT_RX_IEEE1588_PTP (1ULL << 9) /**< RX IEEE1588 L2 Ethernet PT Packet. */ #define PKT_RX_IEEE1588_TMST (1ULL << 10) /**< RX IEEE1588 L2/L4 timestamped packet.*/ -#ifndef RTE_NEXT_ABI -#define PKT_RX_TUNNEL_IPV4_HDR (1ULL << 11) /**< RX tunnel packet with IPv4 header.*/ -#define PKT_RX_TUNNEL_IPV6_HDR (1ULL << 12) /**< RX tunnel packet with IPv6 header. */ -#endif /* RTE_NEXT_ABI */ #define PKT_RX_FDIR_ID (1ULL << 13) /**< FD id reported if FDIR match. */ #define PKT_RX_FDIR_FLX (1ULL << 14) /**< Flexible bytes reported if FDIR match. */ #define PKT_RX_QINQ_PKT (1ULL << 15) /**< RX packet with double VLAN stripped. */ @@ -201,12 +188,13 @@ extern "C" { */ #define PKT_TX_OUTER_IPV6 (1ULL << 60) +#define __RESERVED (1ULL << 61) /**< reserved for future mbuf use */ + #define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */ /* Use final bit of flags to indicate a control mbuf */ #define CTRL_MBUF_FLAG (1ULL << 63) /**< Mbuf contains control data */ -#ifdef RTE_NEXT_ABI /* * 32 bits are divided into several fields to mark packet types. Note that * each field is indexical. @@ -595,7 +583,7 @@ extern "C" { /** * Mask of inner layer 3 packet types. */ -#define RTE_PTYPE_INNER_INNER_L3_MASK 0x00f00000 +#define RTE_PTYPE_INNER_L3_MASK 0x00f00000 /** * TCP (Transmission Control Protocol) packet type. * It is used for inner packet only. @@ -677,20 +665,25 @@ extern "C" { /** * Check if the (outer) L3 header is IPv4. To avoid comparing IPv4 types one by * one, bit 4 is selected to be used for IPv4 only. Then checking bit 4 can - * determin if it is an IPV4 packet. + * determine if it is an IPV4 packet. */ #define RTE_ETH_IS_IPV4_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV4) /** * Check if the (outer) L3 header is IPv4. To avoid comparing IPv4 types one by * one, bit 6 is selected to be used for IPv4 only. Then checking bit 6 can - * determin if it is an IPV4 packet. + * determine if it is an IPV4 packet. */ #define RTE_ETH_IS_IPV6_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV6) /* Check if it is a tunneling packet */ -#define RTE_ETH_IS_TUNNEL_PKT(ptype) ((ptype) & RTE_PTYPE_TUNNEL_MASK) -#endif /* RTE_NEXT_ABI */ +#define RTE_ETH_IS_TUNNEL_PKT(ptype) ((ptype) & (RTE_PTYPE_TUNNEL_MASK | \ + RTE_PTYPE_INNER_L2_MASK | \ + RTE_PTYPE_INNER_L3_MASK | \ + RTE_PTYPE_INNER_L4_MASK)) + +/** Alignment constraint of mbuf private area. */ +#define RTE_MBUF_PRIV_ALIGN 8 /** * Get the name of a RX offload flag @@ -766,7 +759,6 @@ struct rte_mbuf { /* remaining bytes are set on RX when pulling packet from descriptor */ MARKER rx_descriptor_fields1; -#ifdef RTE_NEXT_ABI /* * The packet type, which is the combination of outer/inner L2, L3, L4 * and tunnel types. @@ -787,19 +779,7 @@ struct rte_mbuf { uint32_t pkt_len; /**< Total pkt len: sum of all segments. */ uint16_t data_len; /**< Amount of data in segment buffer. */ uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */ -#else /* RTE_NEXT_ABI */ - /** - * The packet type, which is used to indicate ordinary packet and also - * tunneled packet format, i.e. each number is represented a type of - * packet. - */ - uint16_t packet_type; - uint16_t data_len; /**< Amount of data in segment buffer. */ - uint32_t pkt_len; /**< Total pkt len: sum of all segments. */ - uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */ - uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ -#endif /* RTE_NEXT_ABI */ union { uint32_t rss; /**< RSS hash result if RSS enabled */ struct { @@ -815,17 +795,19 @@ struct rte_mbuf { /**< First 4 flexible bytes or FD ID, dependent on PKT_RX_FDIR_* flag in ol_flags. */ } fdir; /**< Filter identifier if FDIR enabled */ - uint32_t sched; /**< Hierarchical scheduler */ + struct { + uint32_t lo; + uint32_t hi; + } sched; /**< Hierarchical scheduler */ uint32_t usr; /**< User defined tags. See rte_distributor_process() */ } hash; /**< hash information */ uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */ -#ifdef RTE_NEXT_ABI + uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ -#endif /* RTE_NEXT_ABI */ /* second cache line - fields only used in slow path or on TX */ - MARKER cacheline1 __rte_cache_aligned; + MARKER cacheline1 __rte_cache_min_aligned; union { void *userdata; /**< Can be used for external metadata */ @@ -860,8 +842,78 @@ struct rte_mbuf { uint16_t timesync; } __rte_cache_aligned; +/** + * Prefetch the first part of the mbuf + * + * The first 64 bytes of the mbuf corresponds to fields that are used early + * in the receive path. If the cache line of the architecture is higher than + * 64B, the second part will also be prefetched. + * + * @param m + * The pointer to the mbuf. + */ +static inline void +rte_mbuf_prefetch_part1(struct rte_mbuf *m) +{ + rte_prefetch0(&m->cacheline0); +} + +/** + * Prefetch the second part of the mbuf + * + * The next 64 bytes of the mbuf corresponds to fields that are used in the + * transmit path. If the cache line of the architecture is higher than 64B, + * this function does nothing as it is expected that the full mbuf is + * already in cache. + * + * @param m + * The pointer to the mbuf. + */ +static inline void +rte_mbuf_prefetch_part2(struct rte_mbuf *m) +{ +#if RTE_CACHE_LINE_SIZE == 64 + rte_prefetch0(&m->cacheline1); +#else + RTE_SET_USED(m); +#endif +} + + static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp); +/** + * Return the DMA address of the beginning of the mbuf data + * + * @param mb + * The pointer to the mbuf. + * @return + * The physical address of the beginning of the mbuf data + */ +static inline phys_addr_t +rte_mbuf_data_dma_addr(const struct rte_mbuf *mb) +{ + return mb->buf_physaddr + mb->data_off; +} + +/** + * Return the default DMA address of the beginning of the mbuf data + * + * This function is used by drivers in their receive function, as it + * returns the location where data should be written by the NIC, taking + * the default headroom in account. + * + * @param mb + * The pointer to the mbuf. + * @return + * The physical address of the beginning of the mbuf data + */ +static inline phys_addr_t +rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb) +{ + return mb->buf_physaddr + RTE_PKTMBUF_HEADROOM; +} + /** * Return the mbuf owning the data buffer address of an indirect mbuf. * @@ -873,7 +925,7 @@ static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp); static inline struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi) { - return RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size); + return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size); } /** @@ -918,29 +970,11 @@ struct rte_pktmbuf_pool_private { /** check mbuf type in debug mode */ #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h) -/** check mbuf type in debug mode if mbuf pointer is not null */ -#define __rte_mbuf_sanity_check_raw(m, is_h) do { \ - if ((m) != NULL) \ - rte_mbuf_sanity_check(m, is_h); \ -} while (0) - -/** MBUF asserts in debug mode */ -#define RTE_MBUF_ASSERT(exp) \ -if (!(exp)) { \ - rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \ -} - #else /* RTE_LIBRTE_MBUF_DEBUG */ /** check mbuf type in debug mode */ #define __rte_mbuf_sanity_check(m, is_h) do { } while (0) -/** check mbuf type in debug mode if mbuf pointer is not null */ -#define __rte_mbuf_sanity_check_raw(m, is_h) do { } while (0) - -/** MBUF asserts in debug mode */ -#define RTE_MBUF_ASSERT(exp) do { } while (0) - #endif /* RTE_LIBRTE_MBUF_DEBUG */ #ifdef RTE_MBUF_REFCNT_ATOMIC @@ -1053,9 +1087,12 @@ void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header); /** - * @internal Allocate a new mbuf from mempool *mp*. - * The use of that function is reserved for RTE internal needs. - * Please use rte_pktmbuf_alloc(). + * Allocate an unitialized mbuf from mempool *mp*. + * + * This function can be used by PMDs (especially in RX functions) to + * allocate an unitialized mbuf. The driver is responsible of + * initializing all the required fields. See rte_pktmbuf_reset(). + * For standard needs, prefer rte_pktmbuf_alloc(). * * @param mp * The mempool from which mbuf is allocated. @@ -1063,18 +1100,28 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header); * - The pointer to the new mbuf on success. * - NULL if allocation failed. */ -static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp) +static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp) { struct rte_mbuf *m; void *mb = NULL; + if (rte_mempool_get(mp, &mb) < 0) return NULL; m = (struct rte_mbuf *)mb; - RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0); + RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0); rte_mbuf_refcnt_set(m, 1); + __rte_mbuf_sanity_check(m, 0); + return m; } +/* compat with older versions */ +__rte_deprecated static inline struct rte_mbuf * +__rte_mbuf_raw_alloc(struct rte_mempool *mp) +{ + return rte_mbuf_raw_alloc(mp); +} + /** * @internal Put mbuf back into its original mempool. * The use of that function is reserved for RTE internal needs. @@ -1086,7 +1133,7 @@ static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp) static inline void __attribute__((always_inline)) __rte_mbuf_raw_free(struct rte_mbuf *m) { - RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0); + RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0); rte_mempool_put(m->pool, m); } @@ -1232,7 +1279,7 @@ void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg); * details. * @param priv_size * Size of application private are between the rte_mbuf structure - * and the data buffer. + * and the data buffer. This value must be aligned to RTE_MBUF_PRIV_ALIGN. * @param data_room_size * Size of data buffer in each mbuf, including RTE_PKTMBUF_HEADROOM. * @param socket_id @@ -1244,7 +1291,7 @@ void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg); * with rte_errno set appropriately. Possible rte_errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure * - E_RTE_SECONDARY - function was called from a secondary process instance - * - EINVAL - cache size provided is too large + * - EINVAL - cache size provided is too large, or priv_size is not aligned. * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists * - ENOMEM - no appropriate memory area found in which to create memzone @@ -1338,16 +1385,73 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m) static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp) { struct rte_mbuf *m; - if ((m = __rte_mbuf_raw_alloc(mp)) != NULL) + if ((m = rte_mbuf_raw_alloc(mp)) != NULL) rte_pktmbuf_reset(m); return m; } +/** + * Allocate a bulk of mbufs, initialize refcnt and reset the fields to default + * values. + * + * @param pool + * The mempool from which mbufs are allocated. + * @param mbufs + * Array of pointers to mbufs + * @param count + * Array size + * @return + * - 0: Success + */ +static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, + struct rte_mbuf **mbufs, unsigned count) +{ + unsigned idx = 0; + int rc; + + rc = rte_mempool_get_bulk(pool, (void **)mbufs, count); + if (unlikely(rc)) + return rc; + + /* To understand duff's device on loop unwinding optimization, see + * https://en.wikipedia.org/wiki/Duff's_device. + * Here while() loop is used rather than do() while{} to avoid extra + * check if count is zero. + */ + switch (count % 4) { + case 0: + while (idx != count) { + RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0); + rte_mbuf_refcnt_set(mbufs[idx], 1); + rte_pktmbuf_reset(mbufs[idx]); + idx++; + case 3: + RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0); + rte_mbuf_refcnt_set(mbufs[idx], 1); + rte_pktmbuf_reset(mbufs[idx]); + idx++; + case 2: + RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0); + rte_mbuf_refcnt_set(mbufs[idx], 1); + rte_pktmbuf_reset(mbufs[idx]); + idx++; + case 1: + RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0); + rte_mbuf_refcnt_set(mbufs[idx], 1); + rte_pktmbuf_reset(mbufs[idx]); + idx++; + } + } + return 0; +} + /** * Attach packet mbuf to another packet mbuf. * * After attachment we refer the mbuf we attached as 'indirect', * while mbuf we attached to as 'direct'. + * The direct mbuf's reference counter is incremented. + * * Right now, not supported: * - attachment for already indirect mbuf (e.g. - mi has to be direct). * - mbuf we trying to attach (mi) is used by someone else @@ -1362,7 +1466,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m) { struct rte_mbuf *md; - RTE_MBUF_ASSERT(RTE_MBUF_DIRECT(mi) && + RTE_ASSERT(RTE_MBUF_DIRECT(mi) && rte_mbuf_refcnt_read(mi) == 1); /* if m is not direct, get the mbuf that embeds the data */ @@ -1401,13 +1505,17 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m) * * - restore original mbuf address and length values. * - reset pktmbuf data and data_len to their default values. - * All other fields of the given packet mbuf will be left intact. + * - decrement the direct mbuf's reference counter. When the + * reference counter becomes 0, the direct mbuf is freed. + * + * All other fields of the given packet mbuf will be left intact. * * @param m * The indirect attached packet mbuf. */ static inline void rte_pktmbuf_detach(struct rte_mbuf *m) { + struct rte_mbuf *md = rte_mbuf_from_indirect(m); struct rte_mempool *mp = m->pool; uint32_t mbuf_size, buf_len, priv_size; @@ -1422,6 +1530,9 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m) m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len); m->data_len = 0; m->ol_flags = 0; + + if (rte_mbuf_refcnt_update(md, -1) == 0) + __rte_mbuf_raw_free(md); } static inline struct rte_mbuf* __attribute__((always_inline)) @@ -1430,17 +1541,9 @@ __rte_pktmbuf_prefree_seg(struct rte_mbuf *m) __rte_mbuf_sanity_check(m, 0); if (likely(rte_mbuf_refcnt_update(m, -1) == 0)) { - - /* if this is an indirect mbuf, then - * - detach mbuf - * - free attached mbuf segment - */ - if (RTE_MBUF_INDIRECT(m)) { - struct rte_mbuf *md = rte_mbuf_from_indirect(m); + /* if this is an indirect mbuf, it is detached. */ + if (RTE_MBUF_INDIRECT(m)) rte_pktmbuf_detach(m); - if (rte_mbuf_refcnt_update(md, -1) == 0) - __rte_mbuf_raw_free(md); - } return m; } return NULL; @@ -1638,6 +1741,27 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m) */ #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0) +/** + * A macro that returns the physical address that points to an offset of the + * start of the data in the mbuf + * + * @param m + * The packet mbuf. + * @param o + * The offset into the data to calculate address from. + */ +#define rte_pktmbuf_mtophys_offset(m, o) \ + (phys_addr_t)((m)->buf_physaddr + (m)->data_off + (o)) + +/** + * A macro that returns the physical address that points to the start of the + * data in the mbuf + * + * @param m + * The packet mbuf. + */ +#define rte_pktmbuf_mtophys(m) rte_pktmbuf_mtophys_offset(m, 0) + /** * A macro that returns the length of the packet. * @@ -1791,6 +1915,44 @@ static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m) return !!(m->nb_segs == 1); } +/** + * Chain an mbuf to another, thereby creating a segmented packet. + * + * Note: The implementation will do a linear walk over the segments to find + * the tail entry. For cases when there are many segments, it's better to + * chain the entries manually. + * + * @param head + * The head of the mbuf chain (the first packet) + * @param tail + * The mbuf to put last in the chain + * + * @return + * - 0, on success. + * - -EOVERFLOW, if the chain is full (256 entries) + */ +static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail) +{ + struct rte_mbuf *cur_tail; + + /* Check for number-of-segments-overflow */ + if (head->nb_segs + tail->nb_segs >= 1 << (sizeof(head->nb_segs) * 8)) + return -EOVERFLOW; + + /* Chain 'tail' onto the old tail */ + cur_tail = rte_pktmbuf_lastseg(head); + cur_tail->next = tail; + + /* accumulate number of segments and total length. */ + head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs); + head->pkt_len += tail->pkt_len; + + /* pkt_len is only set in the head */ + tail->pkt_len = tail->data_len; + + return 0; +} + /** * Dump an mbuf structure to the console. *