X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mbuf%2Frte_mbuf.h;h=323a1ac1632c170344b6dec7dd693926777ef54d;hb=97cb466d65c9;hp=e15378567a11a5ec645c7f94d192ff9042fb2c93;hpb=54e9290269f048b848481995043e3ec723356b2d;p=dpdk.git diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index e15378567a..323a1ac163 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -400,14 +400,13 @@ struct rte_mbuf { void *buf_addr; /**< Virtual address of segment buffer. */ phys_addr_t buf_physaddr; /**< Physical address of segment buffer. */ - uint16_t buf_len; /**< Length of segment buffer. */ - - /* next 6 bytes are initialised on RX descriptor rearm */ - MARKER8 rearm_data; + /* next 8 bytes are initialised on RX descriptor rearm */ + MARKER64 rearm_data; uint16_t data_off; /** - * 16-bit Reference counter. + * Reference counter. Its size should at least equal to the size + * of port field (16 bits), to support zero-copy broadcast. * It should only be accessed using the following functions: * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and * rte_mbuf_refcnt_set(). The functionality of these functions (atomic, @@ -419,8 +418,10 @@ struct rte_mbuf { rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */ uint16_t refcnt; /**< Non-atomically accessed refcnt */ }; - uint8_t nb_segs; /**< Number of segments. */ - uint8_t port; /**< Input port. */ + uint16_t nb_segs; /**< Number of segments. */ + + /** Input port (16 bits to support more than 256 virtual ports). */ + uint16_t port; uint64_t ol_flags; /**< Offload features. */ @@ -481,6 +482,7 @@ struct rte_mbuf { /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ_STRIPPED is set. */ uint16_t vlan_tci_outer; + uint16_t buf_len; /**< Length of segment buffer. */ /* second cache line - fields only used in slow path or on TX */ MARKER cacheline1 __rte_cache_min_aligned; @@ -775,6 +777,11 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header); * initializing all the required fields. See rte_pktmbuf_reset(). * For standard needs, prefer rte_pktmbuf_alloc(). * + * The caller can expect that the following fields of the mbuf structure + * are initialized: buf_addr, buf_physaddr, buf_len, refcnt=1, nb_segs=1, + * next=NULL, pool, priv_size. The other fields must be initialized + * by the caller. + * * @param mp * The mempool from which mbuf is allocated. * @return @@ -789,28 +796,47 @@ static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp) if (rte_mempool_get(mp, &mb) < 0) return NULL; m = (struct rte_mbuf *)mb; - RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0); - rte_mbuf_refcnt_set(m, 1); + RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); + RTE_ASSERT(m->next == NULL); + RTE_ASSERT(m->nb_segs == 1); __rte_mbuf_sanity_check(m, 0); return m; } /** - * @internal Put mbuf back into its original mempool. - * The use of that function is reserved for RTE internal needs. - * Please use rte_pktmbuf_free(). + * Put mbuf back into its original mempool. + * + * The caller must ensure that the mbuf is direct and properly + * reinitialized (refcnt=1, next=NULL, nb_segs=1), as done by + * rte_pktmbuf_prefree_seg(). + * + * This function should be used with care, when optimization is + * required. For standard needs, prefer rte_pktmbuf_free() or + * rte_pktmbuf_free_seg(). * * @param m * The mbuf to be freed. */ static inline void __attribute__((always_inline)) -__rte_mbuf_raw_free(struct rte_mbuf *m) +rte_mbuf_raw_free(struct rte_mbuf *m) { - RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0); + RTE_ASSERT(RTE_MBUF_DIRECT(m)); + RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); + RTE_ASSERT(m->next == NULL); + RTE_ASSERT(m->nb_segs == 1); + __rte_mbuf_sanity_check(m, 0); rte_mempool_put(m->pool, m); } +/* compat with older versions */ +__rte_deprecated +static inline void +__rte_mbuf_raw_free(struct rte_mbuf *m) +{ + rte_mbuf_raw_free(m); +} + /* Operations on ctrl mbuf */ /** @@ -1216,8 +1242,12 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m) m->data_len = 0; m->ol_flags = 0; - if (rte_mbuf_refcnt_update(md, -1) == 0) - __rte_mbuf_raw_free(md); + if (rte_mbuf_refcnt_update(md, -1) == 0) { + md->next = NULL; + md->nb_segs = 1; + rte_mbuf_refcnt_set(md, 1); + rte_mbuf_raw_free(md); + } } /** @@ -1240,10 +1270,30 @@ rte_pktmbuf_prefree_seg(struct rte_mbuf *m) { __rte_mbuf_sanity_check(m, 0); - if (likely(rte_mbuf_refcnt_update(m, -1) == 0)) { - /* if this is an indirect mbuf, it is detached. */ + if (likely(rte_mbuf_refcnt_read(m) == 1)) { + if (RTE_MBUF_INDIRECT(m)) rte_pktmbuf_detach(m); + + if (m->next != NULL) { + m->next = NULL; + m->nb_segs = 1; + } + + return m; + + } else if (rte_atomic16_add_return(&m->refcnt_atomic, -1) == 0) { + + + if (RTE_MBUF_INDIRECT(m)) + rte_pktmbuf_detach(m); + + if (m->next != NULL) { + m->next = NULL; + m->nb_segs = 1; + } + rte_mbuf_refcnt_set(m, 1); + return m; } return NULL; @@ -1270,10 +1320,8 @@ static inline void __attribute__((always_inline)) rte_pktmbuf_free_seg(struct rte_mbuf *m) { m = rte_pktmbuf_prefree_seg(m); - if (likely(m != NULL)) { - m->next = NULL; - __rte_mbuf_raw_free(m); - } + if (likely(m != NULL)) + rte_mbuf_raw_free(m); } /**