X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mbuf%2Frte_mbuf.h;h=719d04dda296ece11ecc9afb4a598f566670567d;hb=de677ca0a94133088ff741fcd5d95dc25208357e;hp=7a4634fdd5dee0b1defefa8b07be0de8431279f3;hpb=455da54539870b7ed67ea40527858566a6867d02;p=dpdk.git diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index 7a4634fdd5..719d04dda2 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -62,6 +62,7 @@ #include #include +#include #include #include #include @@ -208,6 +209,13 @@ extern "C" { /* add new TX flags here */ +/** + * UDP Fragmentation Offload flag. This flag is used for enabling UDP + * fragmentation in SW or in HW. When use UFO, mbuf->tso_segsz is used + * to store the MSS of UDP fragments. + */ +#define PKT_TX_UDP_SEG (1ULL << 42) + /** * Request security offload processing on the TX packet. */ @@ -236,7 +244,9 @@ extern "C" { /** * Second VLAN insertion (QinQ) flag. */ -#define PKT_TX_QINQ_PKT (1ULL << 49) /**< TX packet with double VLAN inserted. */ +#define PKT_TX_QINQ (1ULL << 49) /**< TX packet with double VLAN inserted. */ +/* this old name is deprecated */ +#define PKT_TX_QINQ_PKT PKT_TX_QINQ /** * TCP segmentation offload. To enable this offload feature for a @@ -297,7 +307,12 @@ extern "C" { */ #define PKT_TX_IPV6 (1ULL << 56) -#define PKT_TX_VLAN_PKT (1ULL << 57) /**< TX packet is a 802.1q VLAN packet. */ +/** + * TX packet is a 802.1q VLAN packet. + */ +#define PKT_TX_VLAN (1ULL << 57) +/* this old name is deprecated */ +#define PKT_TX_VLAN_PKT PKT_TX_VLAN /** * Offload the IP checksum of an external header in the hardware. The @@ -502,7 +517,7 @@ struct rte_mbuf { uint32_t pkt_len; /**< Total pkt len: sum of all segments. */ uint16_t data_len; /**< Amount of data in segment buffer. */ - /** VLAN TCI (CPU order), valid if PKT_RX_VLAN_STRIPPED is set. */ + /** VLAN TCI (CPU order), valid if PKT_RX_VLAN is set. */ uint16_t vlan_tci; union { @@ -528,7 +543,7 @@ struct rte_mbuf { uint32_t usr; /**< User defined tags. See rte_distributor_process() */ } hash; /**< hash information */ - /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ_STRIPPED is set. */ + /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ is set. */ uint16_t vlan_tci_outer; uint16_t buf_len; /**< Length of segment buffer. */ @@ -584,6 +599,9 @@ struct rte_mbuf { } __rte_cache_aligned; +/**< Maximum number of nb_segs allowed. */ +#define RTE_MBUF_MAX_NB_SEGS UINT16_MAX + /** * Prefetch the first part of the mbuf * @@ -625,21 +643,28 @@ rte_mbuf_prefetch_part2(struct rte_mbuf *m) static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp); /** - * Return the DMA address of the beginning of the mbuf data + * Return the IO address of the beginning of the mbuf data * * @param mb * The pointer to the mbuf. * @return - * The physical address of the beginning of the mbuf data + * The IO address of the beginning of the mbuf data */ +static inline rte_iova_t +rte_mbuf_data_iova(const struct rte_mbuf *mb) +{ + return mb->buf_iova + mb->data_off; +} + +__rte_deprecated static inline phys_addr_t rte_mbuf_data_dma_addr(const struct rte_mbuf *mb) { - return mb->buf_iova + mb->data_off; + return rte_mbuf_data_iova(mb); } /** - * Return the default DMA address of the beginning of the mbuf data + * Return the default IO address of the beginning of the mbuf data * * This function is used by drivers in their receive function, as it * returns the location where data should be written by the NIC, taking @@ -648,12 +673,19 @@ rte_mbuf_data_dma_addr(const struct rte_mbuf *mb) * @param mb * The pointer to the mbuf. * @return - * The physical address of the beginning of the mbuf data + * The IO address of the beginning of the mbuf data */ +static inline rte_iova_t +rte_mbuf_data_iova_default(const struct rte_mbuf *mb) +{ + return mb->buf_iova + RTE_PKTMBUF_HEADROOM; +} + +__rte_deprecated static inline phys_addr_t rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb) { - return mb->buf_iova + RTE_PKTMBUF_HEADROOM; + return rte_mbuf_data_iova_default(mb); } /** @@ -747,6 +779,13 @@ rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value) rte_atomic16_set(&m->refcnt_atomic, new_value); } +/* internal */ +static inline uint16_t +__rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) +{ + return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value)); +} + /** * Adds given value to an mbuf's refcnt and returns its new value. * @param m @@ -771,19 +810,26 @@ rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) return 1 + value; } - return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value)); + return __rte_mbuf_refcnt_update(m, value); } #else /* ! RTE_MBUF_REFCNT_ATOMIC */ +/* internal */ +static inline uint16_t +__rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) +{ + m->refcnt = (uint16_t)(m->refcnt + value); + return m->refcnt; +} + /** * Adds given value to an mbuf's refcnt and returns its new value. */ static inline uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) { - m->refcnt = (uint16_t)(m->refcnt + value); - return m->refcnt; + return __rte_mbuf_refcnt_update(m, value); } /** @@ -836,10 +882,10 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header); } while (0) /** - * Allocate an unitialized mbuf from mempool *mp*. + * Allocate an uninitialized mbuf from mempool *mp*. * * This function can be used by PMDs (especially in RX functions) to - * allocate an unitialized mbuf. The driver is responsible of + * allocate an uninitialized mbuf. The driver is responsible of * initializing all the required fields. See rte_pktmbuf_reset(). * For standard needs, prefer rte_pktmbuf_alloc(). * @@ -857,11 +903,9 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header); static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp) { struct rte_mbuf *m; - void *mb = NULL; - if (rte_mempool_get(mp, &mb) < 0) + if (rte_mempool_get(mp, (void **)&m) < 0) return NULL; - m = (struct rte_mbuf *)mb; MBUF_RAW_ALLOC_CHECK(m); return m; } @@ -1347,8 +1391,7 @@ rte_pktmbuf_prefree_seg(struct rte_mbuf *m) return m; - } else if (rte_atomic16_add_return(&m->refcnt_atomic, -1) == 0) { - + } else if (__rte_mbuf_refcnt_update(m, -1) == 0) { if (RTE_MBUF_INDIRECT(m)) rte_pktmbuf_detach(m); @@ -1396,13 +1439,14 @@ rte_pktmbuf_free_seg(struct rte_mbuf *m) * segment is added back into its original mempool. * * @param m - * The packet mbuf to be freed. + * The packet mbuf to be freed. If NULL, the function does nothing. */ static inline void rte_pktmbuf_free(struct rte_mbuf *m) { struct rte_mbuf *m_next; - __rte_mbuf_sanity_check(m, 1); + if (m != NULL) + __rte_mbuf_sanity_check(m, 1); while (m != NULL) { m_next = m->next; @@ -1433,7 +1477,7 @@ static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md, { struct rte_mbuf *mc, *mi, **prev; uint32_t pktlen; - uint8_t nseg; + uint16_t nseg; if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL)) return NULL; @@ -1524,12 +1568,10 @@ static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m) */ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m) { - struct rte_mbuf *m2 = (struct rte_mbuf *)m; - __rte_mbuf_sanity_check(m, 1); - while (m2->next != NULL) - m2 = m2->next; - return m2; + while (m->next != NULL) + m = m->next; + return m; } /** @@ -1564,7 +1606,7 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m) #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0) /** - * A macro that returns the physical address that points to an offset of the + * A macro that returns the IO address that points to an offset of the * start of the data in the mbuf * * @param m @@ -1572,17 +1614,24 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m) * @param o * The offset into the data to calculate address from. */ -#define rte_pktmbuf_mtophys_offset(m, o) \ +#define rte_pktmbuf_iova_offset(m, o) \ (rte_iova_t)((m)->buf_iova + (m)->data_off + (o)) +/* deprecated */ +#define rte_pktmbuf_mtophys_offset(m, o) \ + rte_pktmbuf_iova_offset(m, o) + /** - * A macro that returns the physical address that points to the start of the + * A macro that returns the IO address that points to the start of the * data in the mbuf * * @param m * The packet mbuf. */ -#define rte_pktmbuf_mtophys(m) rte_pktmbuf_mtophys_offset(m, 0) +#define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0) + +/* deprecated */ +#define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m) /** * A macro that returns the length of the packet. @@ -1757,7 +1806,7 @@ const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, * @param len * The amount of bytes to read. * @param buf - * The buffer where data is copied if it is not contigous in mbuf + * The buffer where data is copied if it is not contiguous in mbuf * data. Its length should be at least equal to the len parameter. * @return * The pointer to the data, either in the mbuf if it is contiguous, @@ -1786,14 +1835,14 @@ static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m, * * @return * - 0, on success. - * - -EOVERFLOW, if the chain is full (256 entries) + * - -EOVERFLOW, if the chain segment limit exceeded */ static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail) { struct rte_mbuf *cur_tail; /* Check for number-of-segments-overflow */ - if (head->nb_segs + tail->nb_segs >= 1 << (sizeof(head->nb_segs) * 8)) + if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS) return -EOVERFLOW; /* Chain 'tail' onto the old tail */ @@ -1801,7 +1850,7 @@ static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail cur_tail->next = tail; /* accumulate number of segments and total length. */ - head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs); + head->nb_segs += tail->nb_segs; head->pkt_len += tail->pkt_len; /* pkt_len is only set in the head */