X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fenic_rxtx.c;h=6a8718c086719790c0b162216eb2e4685cd7aaec;hb=75a675d6d2842a2d47c03a55c50e8f7058902658;hp=8d57c418fc66bb1077724110209b2017a391c67c;hpb=828cf603a159edcd2dc0aab936dc9ff402b8ab11;p=dpdk.git diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 8d57c418fc..6a8718c086 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -11,6 +11,7 @@ #include "enic_compat.h" #include "rq_enet_desc.h" #include "enic.h" +#include "enic_rxtx_common.h" #include #include #include @@ -30,268 +31,6 @@ #define rte_packet_prefetch(p) do {} while (0) #endif -static inline uint16_t -enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) -{ - return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK; -} - -static inline uint16_t -enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) -{ - return le16_to_cpu(crd->bytes_written_flags) & - ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; -} - -static inline uint8_t -enic_cq_rx_desc_packet_error(uint16_t bwflags) -{ - return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == - CQ_ENET_RQ_DESC_FLAGS_TRUNCATED; -} - -static inline uint8_t -enic_cq_rx_desc_eop(uint16_t ciflags) -{ - return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) - == CQ_ENET_RQ_DESC_FLAGS_EOP; -} - -static inline uint8_t -enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) -{ - return (le16_to_cpu(cqrd->q_number_rss_type_flags) & - CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == - CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC; -} - -static inline uint8_t -enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) -{ - return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == - CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK; -} - -static inline uint8_t -enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) -{ - return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == - CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK; -} - -static inline uint8_t -enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) -{ - return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >> - CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); -} - -static inline uint32_t -enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) -{ - return le32_to_cpu(cqrd->rss_hash); -} - -static inline uint16_t -enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) -{ - return le16_to_cpu(cqrd->vlan); -} - -static inline uint16_t -enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) -{ - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - return le16_to_cpu(cqrd->bytes_written_flags) & - CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; -} - - -static inline uint8_t -enic_cq_rx_check_err(struct cq_desc *cqd) -{ - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - uint16_t bwflags; - - bwflags = enic_cq_rx_desc_bwflags(cqrd); - if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) - return 1; - return 0; -} - -/* Lookup table to translate RX CQ flags to mbuf flags. */ -static inline uint32_t -enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl) -{ - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - uint8_t cqrd_flags = cqrd->flags; - /* - * Odd-numbered entries are for tunnel packets. All packet type info - * applies to the inner packet, and there is no info on the outer - * packet. The outer flags in these entries exist only to avoid - * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf - * afterwards. - * - * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set - * RTE_PTYPE_TUNNEL_GRENAT.. - */ - static const uint32_t cq_type_table[128] __rte_cache_aligned = { - [0x00] = RTE_PTYPE_UNKNOWN, - [0x01] = RTE_PTYPE_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER, - [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG, - [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, - [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, - [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, - [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, - [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, - [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG, - [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, - [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, - [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, - [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, - [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, - [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - /* All others reserved */ - }; - cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT - | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 - | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; - return cq_type_table[cqrd_flags + tnl]; -} - -static inline void -enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) -{ - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - uint16_t bwflags, pkt_flags = 0, vlan_tci; - bwflags = enic_cq_rx_desc_bwflags(cqrd); - vlan_tci = enic_cq_rx_desc_vlan(cqrd); - - /* VLAN STRIPPED flag. The L2 packet type updated here also */ - if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { - pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; - mbuf->packet_type |= RTE_PTYPE_L2_ETHER; - } else { - if (vlan_tci != 0) { - pkt_flags |= PKT_RX_VLAN; - mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN; - } else { - mbuf->packet_type |= RTE_PTYPE_L2_ETHER; - } - } - mbuf->vlan_tci = vlan_tci; - - if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) { - struct cq_enet_rq_clsf_desc *clsf_cqd; - uint16_t filter_id; - clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd; - filter_id = clsf_cqd->filter_id; - if (filter_id) { - pkt_flags |= PKT_RX_FDIR; - if (filter_id != ENIC_MAGIC_FILTER_ID) { - mbuf->hash.fdir.hi = clsf_cqd->filter_id; - pkt_flags |= PKT_RX_FDIR_ID; - } - } - } else if (enic_cq_rx_desc_rss_type(cqrd)) { - /* RSS flag */ - pkt_flags |= PKT_RX_RSS_HASH; - mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); - } - - /* checksum flags */ - if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) { - if (!enic_cq_rx_desc_csum_not_calc(cqrd)) { - uint32_t l4_flags; - l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK; - - /* - * When overlay offload is enabled, the NIC may - * set ipv4_csum_ok=1 if the inner packet is IPv6.. - * So, explicitly check for IPv4 before checking - * ipv4_csum_ok. - */ - if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) { - if (enic_cq_rx_desc_ipv4_csum_ok(cqrd)) - pkt_flags |= PKT_RX_IP_CKSUM_GOOD; - else - pkt_flags |= PKT_RX_IP_CKSUM_BAD; - } - - if (l4_flags == RTE_PTYPE_L4_UDP || - l4_flags == RTE_PTYPE_L4_TCP) { - if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)) - pkt_flags |= PKT_RX_L4_CKSUM_GOOD; - else - pkt_flags |= PKT_RX_L4_CKSUM_BAD; - } - } - } - - mbuf->ol_flags = pkt_flags; -} - /* dummy receive function to replace actual function in * order to do safe reconfiguration operations. */ @@ -590,7 +329,8 @@ enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return rx - rx_pkts; } -static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) +static inline void enic_free_wq_bufs(struct vnic_wq *wq, + uint16_t completed_index) { struct rte_mbuf *buf; struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS]; @@ -632,7 +372,7 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) { - u16 completed_index; + uint16_t completed_index; completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff; @@ -654,11 +394,22 @@ uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (i = 0; i != nb_pkts; i++) { m = tx_pkts[i]; - if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) { - rte_errno = EINVAL; - return i; - } ol_flags = m->ol_flags; + if (!(ol_flags & PKT_TX_TCP_SEG)) { + if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) { + rte_errno = EINVAL; + return i; + } + } else { + uint16_t header_len; + + header_len = m->l2_len + m->l3_len + m->l4_len; + if (m->tso_segsz + header_len > ENIC_TX_MAX_PKT_SIZE) { + rte_errno = EINVAL; + return i; + } + } + if (ol_flags & wq->tx_offload_notsup_mask) { rte_errno = ENOTSUP; return i; @@ -666,13 +417,13 @@ uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -842,12 +593,33 @@ static void enqueue_simple_pkts(struct rte_mbuf **pkts, struct enic *enic) { struct rte_mbuf *p; + uint16_t mss; while (n) { n--; p = *pkts++; desc->address = p->buf_iova + p->data_off; desc->length = p->pkt_len; + /* VLAN insert */ + desc->vlan_tag = p->vlan_tci; + desc->header_length_flags &= + ((1 << WQ_ENET_FLAGS_EOP_SHIFT) | + (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT)); + if (p->ol_flags & PKT_TX_VLAN) { + desc->header_length_flags |= + 1 << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT; + } + /* + * Checksum offload. We use WQ_ENET_OFFLOAD_MODE_CSUM, which + * is 0, so no need to set offload_mode. + */ + mss = 0; + if (p->ol_flags & PKT_TX_IP_CKSUM) + mss |= ENIC_CALC_IP_CKSUM << WQ_ENET_MSS_SHIFT; + if (p->ol_flags & PKT_TX_L4_MASK) + mss |= ENIC_CALC_TCP_UDP_CKSUM << WQ_ENET_MSS_SHIFT; + desc->mss_loopback = mss; + /* * The app should not send oversized * packets. tx_pkt_prepare includes a check as