X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mbuf%2Frte_mbuf.h;h=f7886dcb30781d0f03abf8a4cda3089e884821d5;hb=8d9c2c3a1f01d58bc70dc0ba95656208ac3d47a4;hp=d7162947519571df880070744e5372b3b76ac002;hpb=c277b34c1b3b787d4b89bb4d0663c1871effbe55;p=dpdk.git diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index d716294751..f7886dcb30 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -40,6 +40,7 @@ #include #include #include +#include #include #ifdef __cplusplus @@ -479,6 +480,50 @@ struct rte_mbuf_sched { uint16_t reserved; /**< Reserved. */ }; /**< Hierarchical scheduler */ +/** + * enum for the tx_offload bit-fields lenghts and offsets. + * defines the layout of rte_mbuf tx_offload field. + */ +enum { + RTE_MBUF_L2_LEN_BITS = 7, + RTE_MBUF_L3_LEN_BITS = 9, + RTE_MBUF_L4_LEN_BITS = 8, + RTE_MBUF_TSO_SEGSZ_BITS = 16, + RTE_MBUF_OUTL3_LEN_BITS = 9, + RTE_MBUF_OUTL2_LEN_BITS = 7, + RTE_MBUF_TXOFLD_UNUSED_BITS = sizeof(uint64_t) * CHAR_BIT - + RTE_MBUF_L2_LEN_BITS - + RTE_MBUF_L3_LEN_BITS - + RTE_MBUF_L4_LEN_BITS - + RTE_MBUF_TSO_SEGSZ_BITS - + RTE_MBUF_OUTL3_LEN_BITS - + RTE_MBUF_OUTL2_LEN_BITS, +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + RTE_MBUF_L2_LEN_OFS = + sizeof(uint64_t) * CHAR_BIT - RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS - RTE_MBUF_L3_LEN_BITS, + RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS - RTE_MBUF_L4_LEN_BITS, + RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS - RTE_MBUF_TSO_SEGSZ_BITS, + RTE_MBUF_OUTL3_LEN_OFS = + RTE_MBUF_TSO_SEGSZ_OFS - RTE_MBUF_OUTL3_LEN_BITS, + RTE_MBUF_OUTL2_LEN_OFS = + RTE_MBUF_OUTL3_LEN_OFS - RTE_MBUF_OUTL2_LEN_BITS, + RTE_MBUF_TXOFLD_UNUSED_OFS = + RTE_MBUF_OUTL2_LEN_OFS - RTE_MBUF_TXOFLD_UNUSED_BITS, +#else + RTE_MBUF_L2_LEN_OFS = 0, + RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS + RTE_MBUF_L2_LEN_BITS, + RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS + RTE_MBUF_L3_LEN_BITS, + RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS + RTE_MBUF_L4_LEN_BITS, + RTE_MBUF_OUTL3_LEN_OFS = + RTE_MBUF_TSO_SEGSZ_OFS + RTE_MBUF_TSO_SEGSZ_BITS, + RTE_MBUF_OUTL2_LEN_OFS = + RTE_MBUF_OUTL3_LEN_OFS + RTE_MBUF_OUTL3_LEN_BITS, + RTE_MBUF_TXOFLD_UNUSED_OFS = + RTE_MBUF_OUTL2_LEN_OFS + RTE_MBUF_OUTL2_LEN_BITS, +#endif +}; + /** * The generic rte_mbuf, containing a packet mbuf. */ @@ -640,19 +685,24 @@ struct rte_mbuf { uint64_t tx_offload; /**< combined for easy fetch */ __extension__ struct { - uint64_t l2_len:7; + uint64_t l2_len:RTE_MBUF_L2_LEN_BITS; /**< L2 (MAC) Header Length for non-tunneling pkt. * Outer_L4_len + ... + Inner_L2_len for tunneling pkt. */ - uint64_t l3_len:9; /**< L3 (IP) Header Length. */ - uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */ - uint64_t tso_segsz:16; /**< TCP TSO segment size */ + uint64_t l3_len:RTE_MBUF_L3_LEN_BITS; + /**< L3 (IP) Header Length. */ + uint64_t l4_len:RTE_MBUF_L4_LEN_BITS; + /**< L4 (TCP/UDP) Header Length. */ + uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS; + /**< TCP TSO segment size */ /* fields for TX offloading of tunnels */ - uint64_t outer_l3_len:9; /**< Outer L3 (IP) Hdr Length. */ - uint64_t outer_l2_len:7; /**< Outer L2 (MAC) Hdr Length. */ + uint64_t outer_l3_len:RTE_MBUF_OUTL3_LEN_BITS; + /**< Outer L3 (IP) Hdr Length. */ + uint64_t outer_l2_len:RTE_MBUF_OUTL2_LEN_BITS; + /**< Outer L2 (MAC) Hdr Length. */ - /* uint64_t unused:8; */ + /* uint64_t unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */ }; }; @@ -973,7 +1023,7 @@ rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) { /* * The atomic_add is an expensive operation, so we don't want to - * call it in the case where we know we are the uniq holder of + * call it in the case where we know we are the unique holder of * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic * operation has to be used because concurrent accesses on the * reference counter can occur. @@ -2243,6 +2293,43 @@ static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail return 0; } +/* + * @warning + * @b EXPERIMENTAL: This API may change without prior notice. + * + * For given input values generate raw tx_offload value. + * Note that it is caller responsibility to make sure that input parameters + * don't exceed maximum bit-field values. + * @param il2 + * l2_len value. + * @param il3 + * l3_len value. + * @param il4 + * l4_len value. + * @param tso + * tso_segsz value. + * @param ol3 + * outer_l3_len value. + * @param ol2 + * outer_l2_len value. + * @param unused + * unused value. + * @return + * raw tx_offload value. + */ +static __rte_always_inline uint64_t +rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso, + uint64_t ol3, uint64_t ol2, uint64_t unused) +{ + return il2 << RTE_MBUF_L2_LEN_OFS | + il3 << RTE_MBUF_L3_LEN_OFS | + il4 << RTE_MBUF_L4_LEN_OFS | + tso << RTE_MBUF_TSO_SEGSZ_OFS | + ol3 << RTE_MBUF_OUTL3_LEN_OFS | + ol2 << RTE_MBUF_OUTL2_LEN_OFS | + unused << RTE_MBUF_TXOFLD_UNUSED_OFS; +} + /** * Validate general requirements for Tx offload in mbuf. * @@ -2257,23 +2344,11 @@ static inline int rte_validate_tx_offload(const struct rte_mbuf *m) { uint64_t ol_flags = m->ol_flags; - uint64_t inner_l3_offset = m->l2_len; /* Does packet set any of available offloads? */ if (!(ol_flags & PKT_TX_OFFLOAD_MASK)) return 0; - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) - /* NB: elaborating the addition like this instead of using - * += gives the result uint64_t type instead of int, - * avoiding compiler warnings on gcc 8.1 at least */ - inner_l3_offset = inner_l3_offset + m->outer_l2_len + - m->outer_l3_len; - - /* Headers are fragmented */ - if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len) - return -ENOTSUP; - /* IP checksum can be counted only for IPv4 packet */ if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6)) return -EINVAL; @@ -2464,6 +2539,7 @@ rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, .queue_id = queue_id, .traffic_class = traffic_class, .color = color, + .reserved = 0, }; }