X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mbuf%2Frte_mbuf.h;h=fb0849ac147365f6613fc2d533b633be0b3e310a;hb=c3a90c381daa68a0ea65906ad15bc96c247ac5cd;hp=d7162947519571df880070744e5372b3b76ac002;hpb=c277b34c1b3b787d4b89bb4d0663c1871effbe55;p=dpdk.git diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index d716294751..fb0849ac14 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -40,6 +40,7 @@ #include #include #include +#include #include #ifdef __cplusplus @@ -209,7 +210,7 @@ extern "C" { /** * Outer UDP checksum offload flag. This flag is used for enabling * outer UDP checksum in PMD. To use outer UDP checksum, the user needs to - * 1) Enable the following in mbuff, + * 1) Enable the following in mbuf, * a) Fill outer_l2_len and outer_l3_len in mbuf. * b) Set the PKT_TX_OUTER_UDP_CKSUM flag. * c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag. @@ -279,9 +280,11 @@ extern "C" { #define PKT_TX_TUNNEL_MASK (0xFULL << 45) /** - * Second VLAN insertion (QinQ) flag. + * Double VLAN insertion (QinQ) request to driver, driver may offload the + * insertion based on device capability. + * mbuf 'vlan_tci' & 'vlan_tci_outer' must be valid when this flag is set. */ -#define PKT_TX_QINQ (1ULL << 49) /**< TX packet with double VLAN inserted. */ +#define PKT_TX_QINQ (1ULL << 49) /* this old name is deprecated */ #define PKT_TX_QINQ_PKT PKT_TX_QINQ @@ -337,7 +340,9 @@ extern "C" { #define PKT_TX_IPV6 (1ULL << 56) /** - * TX packet is a 802.1q VLAN packet. + * VLAN tag insertion request to driver, driver may offload the insertion + * based on the device capability. + * mbuf 'vlan_tci' field must be valid when this flag is set. */ #define PKT_TX_VLAN (1ULL << 57) /* this old name is deprecated */ @@ -479,6 +484,50 @@ struct rte_mbuf_sched { uint16_t reserved; /**< Reserved. */ }; /**< Hierarchical scheduler */ +/** + * enum for the tx_offload bit-fields lengths and offsets. + * defines the layout of rte_mbuf tx_offload field. + */ +enum { + RTE_MBUF_L2_LEN_BITS = 7, + RTE_MBUF_L3_LEN_BITS = 9, + RTE_MBUF_L4_LEN_BITS = 8, + RTE_MBUF_TSO_SEGSZ_BITS = 16, + RTE_MBUF_OUTL3_LEN_BITS = 9, + RTE_MBUF_OUTL2_LEN_BITS = 7, + RTE_MBUF_TXOFLD_UNUSED_BITS = sizeof(uint64_t) * CHAR_BIT - + RTE_MBUF_L2_LEN_BITS - + RTE_MBUF_L3_LEN_BITS - + RTE_MBUF_L4_LEN_BITS - + RTE_MBUF_TSO_SEGSZ_BITS - + RTE_MBUF_OUTL3_LEN_BITS - + RTE_MBUF_OUTL2_LEN_BITS, +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + RTE_MBUF_L2_LEN_OFS = + sizeof(uint64_t) * CHAR_BIT - RTE_MBUF_L2_LEN_BITS, + RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS - RTE_MBUF_L3_LEN_BITS, + RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS - RTE_MBUF_L4_LEN_BITS, + RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS - RTE_MBUF_TSO_SEGSZ_BITS, + RTE_MBUF_OUTL3_LEN_OFS = + RTE_MBUF_TSO_SEGSZ_OFS - RTE_MBUF_OUTL3_LEN_BITS, + RTE_MBUF_OUTL2_LEN_OFS = + RTE_MBUF_OUTL3_LEN_OFS - RTE_MBUF_OUTL2_LEN_BITS, + RTE_MBUF_TXOFLD_UNUSED_OFS = + RTE_MBUF_OUTL2_LEN_OFS - RTE_MBUF_TXOFLD_UNUSED_BITS, +#else + RTE_MBUF_L2_LEN_OFS = 0, + RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS + RTE_MBUF_L2_LEN_BITS, + RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS + RTE_MBUF_L3_LEN_BITS, + RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS + RTE_MBUF_L4_LEN_BITS, + RTE_MBUF_OUTL3_LEN_OFS = + RTE_MBUF_TSO_SEGSZ_OFS + RTE_MBUF_TSO_SEGSZ_BITS, + RTE_MBUF_OUTL2_LEN_OFS = + RTE_MBUF_OUTL3_LEN_OFS + RTE_MBUF_OUTL3_LEN_BITS, + RTE_MBUF_TXOFLD_UNUSED_OFS = + RTE_MBUF_OUTL2_LEN_OFS + RTE_MBUF_OUTL2_LEN_BITS, +#endif +}; + /** * The generic rte_mbuf, containing a packet mbuf. */ @@ -619,6 +668,8 @@ struct rte_mbuf { /** Valid if PKT_RX_TIMESTAMP is set. The unit and time reference * are not normalized but are always the same for a given port. + * Some devices allow to query rte_eth_read_clock that will return the + * current device timestamp. */ uint64_t timestamp; @@ -640,19 +691,35 @@ struct rte_mbuf { uint64_t tx_offload; /**< combined for easy fetch */ __extension__ struct { - uint64_t l2_len:7; + uint64_t l2_len:RTE_MBUF_L2_LEN_BITS; /**< L2 (MAC) Header Length for non-tunneling pkt. * Outer_L4_len + ... + Inner_L2_len for tunneling pkt. */ - uint64_t l3_len:9; /**< L3 (IP) Header Length. */ - uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */ - uint64_t tso_segsz:16; /**< TCP TSO segment size */ - - /* fields for TX offloading of tunnels */ - uint64_t outer_l3_len:9; /**< Outer L3 (IP) Hdr Length. */ - uint64_t outer_l2_len:7; /**< Outer L2 (MAC) Hdr Length. */ + uint64_t l3_len:RTE_MBUF_L3_LEN_BITS; + /**< L3 (IP) Header Length. */ + uint64_t l4_len:RTE_MBUF_L4_LEN_BITS; + /**< L4 (TCP/UDP) Header Length. */ + uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS; + /**< TCP TSO segment size */ + + /* + * Fields for Tx offloading of tunnels. + * These are undefined for packets which don't request + * any tunnel offloads (outer IP or UDP checksum, + * tunnel TSO). + * + * PMDs should not use these fields unconditionally + * when calculating offsets. + * + * Applications are expected to set appropriate tunnel + * offload flags when they fill in these fields. + */ + uint64_t outer_l3_len:RTE_MBUF_OUTL3_LEN_BITS; + /**< Outer L3 (IP) Hdr Length. */ + uint64_t outer_l2_len:RTE_MBUF_OUTL2_LEN_BITS; + /**< Outer L2 (MAC) Hdr Length. */ - /* uint64_t unused:8; */ + /* uint64_t unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */ }; }; @@ -810,7 +877,8 @@ rte_mbuf_from_indirect(struct rte_mbuf *mi) * @return * The pointer of the mbuf buffer. */ -static inline char * __rte_experimental +__rte_experimental +static inline char * rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp) { return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp); @@ -827,10 +895,18 @@ rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp) * @return * The pointer of the beginning of the mbuf data. */ -static inline char * __rte_experimental -rte_mbuf_data_addr_default(struct rte_mbuf *mb) +__rte_experimental +static inline char * +rte_mbuf_data_addr_default(__rte_unused struct rte_mbuf *mb) { + /* gcc complains about calling this experimental function even + * when not using it. Hide it with ALLOW_EXPERIMENTAL_API. + */ +#ifdef ALLOW_EXPERIMENTAL_API return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM; +#else + return NULL; +#endif } /** @@ -870,7 +946,8 @@ rte_mbuf_to_baddr(struct rte_mbuf *md) * @return * The starting address of the private data area of the given mbuf. */ -static inline void * __rte_experimental +__rte_experimental +static inline void * rte_mbuf_to_priv(struct rte_mbuf *m) { return RTE_PTR_ADD(m, sizeof(struct rte_mbuf)); @@ -973,7 +1050,7 @@ rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) { /* * The atomic_add is an expensive operation, so we don't want to - * call it in the case where we know we are the uniq holder of + * call it in the case where we know we are the unique holder of * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic * operation has to be used because concurrent accesses on the * reference counter can occur. @@ -1369,7 +1446,7 @@ static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m) * The given mbuf must have only one segment. * * @param m - * The packet mbuf to be resetted. + * The packet mbuf to be reset. */ #define MBUF_INVALID_PORT UINT16_MAX @@ -1542,7 +1619,7 @@ rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, * ``rte_pktmbuf_detach()``. * * Memory for shared data must be provided and user must initialize all of - * the content properly, escpecially free callback and refcnt. The pointer + * the content properly, especially free callback and refcnt. The pointer * of shared data will be stored in m->shinfo. * ``rte_pktmbuf_ext_shinfo_init_helper`` can help to simply spare a few * bytes at the end of buffer for the shared data, store free callback and @@ -1607,6 +1684,19 @@ rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, */ #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m) +/* internal */ +static inline void +__rte_pktmbuf_copy_hdr(struct rte_mbuf *mdst, const struct rte_mbuf *msrc) +{ + mdst->port = msrc->port; + mdst->vlan_tci = msrc->vlan_tci; + mdst->vlan_tci_outer = msrc->vlan_tci_outer; + mdst->tx_offload = msrc->tx_offload; + mdst->hash = msrc->hash; + mdst->packet_type = msrc->packet_type; + mdst->timestamp = msrc->timestamp; +} + /** * Attach packet mbuf to another packet mbuf. * @@ -1644,23 +1734,17 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m) mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF; } - mi->buf_iova = m->buf_iova; - mi->buf_addr = m->buf_addr; - mi->buf_len = m->buf_len; + __rte_pktmbuf_copy_hdr(mi, m); mi->data_off = m->data_off; mi->data_len = m->data_len; - mi->port = m->port; - mi->vlan_tci = m->vlan_tci; - mi->vlan_tci_outer = m->vlan_tci_outer; - mi->tx_offload = m->tx_offload; - mi->hash = m->hash; + mi->buf_iova = m->buf_iova; + mi->buf_addr = m->buf_addr; + mi->buf_len = m->buf_len; mi->next = NULL; mi->pkt_len = mi->data_len; mi->nb_segs = 1; - mi->packet_type = m->packet_type; - mi->timestamp = m->timestamp; __rte_mbuf_sanity_check(mi, 1); __rte_mbuf_sanity_check(m, 0); @@ -1831,7 +1915,7 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m) } /** - * Creates a "clone" of the given packet mbuf. + * Create a "clone" of the given packet mbuf. * * Walks through all segments of the given packet mbuf, and for each of them: * - Creates a new packet mbuf from the given pool. @@ -1847,42 +1931,34 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m) * - The pointer to the new "clone" mbuf on success. * - NULL if allocation fails. */ -static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md, - struct rte_mempool *mp) -{ - struct rte_mbuf *mc, *mi, **prev; - uint32_t pktlen; - uint16_t nseg; - - if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL)) - return NULL; - - mi = mc; - prev = &mi->next; - pktlen = md->pkt_len; - nseg = 0; - - do { - nseg++; - rte_pktmbuf_attach(mi, md); - *prev = mi; - prev = &mi->next; - } while ((md = md->next) != NULL && - (mi = rte_pktmbuf_alloc(mp)) != NULL); - - *prev = NULL; - mc->nb_segs = nseg; - mc->pkt_len = pktlen; - - /* Allocation of new indirect segment failed */ - if (unlikely (mi == NULL)) { - rte_pktmbuf_free(mc); - return NULL; - } +struct rte_mbuf * +rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp); - __rte_mbuf_sanity_check(mc, 1); - return mc; -} +/** + * Create a full copy of a given packet mbuf. + * + * Copies all the data from a given packet mbuf to a newly allocated + * set of mbufs. The private data are is not copied. + * + * @param m + * The packet mbuf to be copiedd. + * @param mp + * The mempool from which the "clone" mbufs are allocated. + * @param offset + * The number of bytes to skip before copying. + * If the mbuf does not have that many bytes, it is an error + * and NULL is returned. + * @param length + * The upper limit on bytes to copy. Passing UINT32_MAX + * means all data (after offset). + * @return + * - The pointer to the new "clone" mbuf on success. + * - NULL if allocation fails. + */ +__rte_experimental +struct rte_mbuf * +rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp, + uint32_t offset, uint32_t length); /** * Adds given value to the refcnt of all packet mbuf segments. @@ -2243,6 +2319,43 @@ static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail return 0; } +/* + * @warning + * @b EXPERIMENTAL: This API may change without prior notice. + * + * For given input values generate raw tx_offload value. + * Note that it is caller responsibility to make sure that input parameters + * don't exceed maximum bit-field values. + * @param il2 + * l2_len value. + * @param il3 + * l3_len value. + * @param il4 + * l4_len value. + * @param tso + * tso_segsz value. + * @param ol3 + * outer_l3_len value. + * @param ol2 + * outer_l2_len value. + * @param unused + * unused value. + * @return + * raw tx_offload value. + */ +static __rte_always_inline uint64_t +rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso, + uint64_t ol3, uint64_t ol2, uint64_t unused) +{ + return il2 << RTE_MBUF_L2_LEN_OFS | + il3 << RTE_MBUF_L3_LEN_OFS | + il4 << RTE_MBUF_L4_LEN_OFS | + tso << RTE_MBUF_TSO_SEGSZ_OFS | + ol3 << RTE_MBUF_OUTL3_LEN_OFS | + ol2 << RTE_MBUF_OUTL2_LEN_OFS | + unused << RTE_MBUF_TXOFLD_UNUSED_OFS; +} + /** * Validate general requirements for Tx offload in mbuf. * @@ -2257,23 +2370,11 @@ static inline int rte_validate_tx_offload(const struct rte_mbuf *m) { uint64_t ol_flags = m->ol_flags; - uint64_t inner_l3_offset = m->l2_len; /* Does packet set any of available offloads? */ if (!(ol_flags & PKT_TX_OFFLOAD_MASK)) return 0; - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) - /* NB: elaborating the addition like this instead of using - * += gives the result uint64_t type instead of int, - * avoiding compiler warnings on gcc 8.1 at least */ - inner_l3_offset = inner_l3_offset + m->outer_l2_len + - m->outer_l3_len; - - /* Headers are fragmented */ - if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len) - return -ENOTSUP; - /* IP checksum can be counted only for IPv4 packet */ if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6)) return -EINVAL; @@ -2298,6 +2399,11 @@ rte_validate_tx_offload(const struct rte_mbuf *m) return 0; } +/** + * @internal used by rte_pktmbuf_linearize(). + */ +int __rte_pktmbuf_linearize(struct rte_mbuf *mbuf); + /** * Linearize data in mbuf. * @@ -2313,40 +2419,9 @@ rte_validate_tx_offload(const struct rte_mbuf *m) static inline int rte_pktmbuf_linearize(struct rte_mbuf *mbuf) { - size_t seg_len, copy_len; - struct rte_mbuf *m; - struct rte_mbuf *m_next; - char *buffer; - if (rte_pktmbuf_is_contiguous(mbuf)) return 0; - - /* Extend first segment to the total packet length */ - copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf); - - if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf))) - return -1; - - buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len); - mbuf->data_len = (uint16_t)(mbuf->pkt_len); - - /* Append data from next segments to the first one */ - m = mbuf->next; - while (m != NULL) { - m_next = m->next; - - seg_len = rte_pktmbuf_data_len(m); - rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len); - buffer += seg_len; - - rte_pktmbuf_free_seg(m); - m = m_next; - } - - mbuf->next = NULL; - mbuf->nb_segs = 1; - - return 0; + return __rte_pktmbuf_linearize(mbuf); } /** @@ -2464,6 +2539,7 @@ rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, .queue_id = queue_id, .traffic_class = traffic_class, .color = color, + .reserved = 0, }; }