#include "enic_compat.h"
#include "rq_enet_desc.h"
#include "enic.h"
-#include "enic_vnic_wq.h"
#define RTE_PMD_USE_PREFETCH
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
uint8_t cqrd_flags = cqrd->flags;
static const uint32_t cq_type_table[128] __rte_cache_aligned = {
- [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
- | RTE_PTYPE_L4_UDP,
- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
- | RTE_PTYPE_L4_TCP,
- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
- | RTE_PTYPE_L4_FRAG,
- [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
- [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
- | RTE_PTYPE_L4_UDP,
- [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
- | RTE_PTYPE_L4_TCP,
- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
- | RTE_PTYPE_L4_FRAG,
+ [0x00] = RTE_PTYPE_UNKNOWN,
+ [0x20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+ | RTE_PTYPE_L4_NONFRAG,
+ [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+ | RTE_PTYPE_L4_UDP,
+ [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+ | RTE_PTYPE_L4_TCP,
+ [0x60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+ | RTE_PTYPE_L4_FRAG,
+ [0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+ | RTE_PTYPE_L4_UDP,
+ [0x64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+ | RTE_PTYPE_L4_TCP,
+ [0x10] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+ | RTE_PTYPE_L4_NONFRAG,
+ [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+ | RTE_PTYPE_L4_UDP,
+ [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+ | RTE_PTYPE_L4_TCP,
+ [0x50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+ | RTE_PTYPE_L4_FRAG,
+ [0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+ | RTE_PTYPE_L4_UDP,
+ [0x54] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+ | RTE_PTYPE_L4_TCP,
/* All others reserved */
};
cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
mbuf->ol_flags = pkt_flags;
}
-static inline uint32_t
-enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
-{
- uint32_t d = i0 + i1;
- RTE_ASSERT(i0 < n_descriptors);
- RTE_ASSERT(i1 < n_descriptors);
- d -= (d >= n_descriptors) ? n_descriptors : 0;
- return d;
-}
-
-
uint16_t
enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
return nb_rx;
}
-static void enic_wq_free_buf(struct vnic_wq *wq,
- __rte_unused struct cq_desc *cq_desc,
- struct vnic_wq_buf *buf,
- __rte_unused void *opaque)
-{
- enic_free_wq_buf(wq, buf);
-}
-
-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
+static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_wq_service(&enic->wq[q_number], cq_desc,
- completed_index, enic_wq_free_buf,
- opaque);
-
- return 0;
-}
-
-unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)
-{
- unsigned int cq = enic_cq_wq(enic, wq->index);
+ struct vnic_wq_buf *buf;
+ struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
+ unsigned int nb_to_free, nb_free = 0, i;
+ struct rte_mempool *pool;
+ unsigned int tail_idx;
+ unsigned int desc_count = wq->ring.desc_count;
+
+ nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
+ + 1;
+ tail_idx = wq->tail_idx;
+ buf = &wq->bufs[tail_idx];
+ pool = ((struct rte_mbuf *)buf->mb)->pool;
+ for (i = 0; i < nb_to_free; i++) {
+ buf = &wq->bufs[tail_idx];
+ m = (struct rte_mbuf *)(buf->mb);
+ if (likely(m->pool == pool)) {
+ ENIC_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(pool, (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ pool = m->pool;
+ }
+ tail_idx = enic_ring_incr(desc_count, tail_idx);
+ buf->mb = NULL;
+ }
- /* Return the work done */
- return vnic_cq_service(&enic->cq[cq],
- -1 /*wq_work_to_do*/, enic_wq_service, NULL);
-}
+ rte_mempool_put_bulk(pool, (void **)free, nb_free);
-void enic_post_wq_index(struct vnic_wq *wq)
-{
- enic_vnic_post_wq_index(wq);
+ wq->tail_idx = tail_idx;
+ wq->ring.desc_avail += nb_to_free;
}
-void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
- struct rte_mbuf *tx_pkt, unsigned short len,
- uint8_t sop, uint8_t eop, uint8_t cq_entry,
- uint16_t ol_flags, uint16_t vlan_tag)
+unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
{
- struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
- uint16_t mss = 0;
- uint8_t vlan_tag_insert = 0;
- uint64_t bus_addr = (dma_addr_t)
- (tx_pkt->buf_physaddr + tx_pkt->data_off);
+ u16 completed_index;
- if (sop) {
- if (ol_flags & PKT_TX_VLAN_PKT)
- vlan_tag_insert = 1;
+ completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
- if (enic->hw_ip_checksum) {
- if (ol_flags & PKT_TX_IP_CKSUM)
- mss |= ENIC_CALC_IP_CKSUM;
-
- if (ol_flags & PKT_TX_TCP_UDP_CKSUM)
- mss |= ENIC_CALC_TCP_UDP_CKSUM;
- }
+ if (wq->last_completed_index != completed_index) {
+ enic_free_wq_bufs(wq, completed_index);
+ wq->last_completed_index = completed_index;
}
-
- wq_enet_desc_enc(desc,
- bus_addr,
- len,
- mss,
- 0 /* header_length */,
- 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */,
- eop,
- cq_entry,
- 0 /* fcoe_encap */,
- vlan_tag_insert,
- vlan_tag,
- 0 /* loopback */);
-
- enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len,
- sop,
- 1 /*desc_skip_cnt*/,
- cq_entry,
- 0 /*compressed send*/,
- 0 /*wrid*/);
+ return 0;
}
-uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
uint16_t index;
- unsigned int frags;
- unsigned int pkt_len;
- unsigned int seg_len;
- unsigned int inc_len;
+ unsigned int pkt_len, data_len;
unsigned int nb_segs;
- struct rte_mbuf *tx_pkt, *next_tx_pkt;
+ struct rte_mbuf *tx_pkt;
struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
struct enic *enic = vnic_dev_priv(wq->vdev);
unsigned short vlan_id;
- unsigned short ol_flags;
- uint8_t last_seg, eop;
- unsigned int host_tx_descs = 0;
+ uint64_t ol_flags;
+ uint64_t ol_flags_mask;
+ unsigned int wq_desc_avail;
+ int head_idx;
+ struct vnic_wq_buf *buf;
+ unsigned int desc_count;
+ struct wq_enet_desc *descs, *desc_p, desc_tmp;
+ uint16_t mss;
+ uint8_t vlan_tag_insert;
+ uint8_t eop;
+ uint64_t bus_addr;
+
+ enic_cleanup_wq(enic, wq);
+ wq_desc_avail = vnic_wq_desc_avail(wq);
+ head_idx = wq->head_idx;
+ desc_count = wq->ring.desc_count;
+ ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
+
+ nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
for (index = 0; index < nb_pkts; index++) {
tx_pkt = *tx_pkts++;
- inc_len = 0;
nb_segs = tx_pkt->nb_segs;
- if (nb_segs > vnic_wq_desc_avail(wq)) {
+ if (nb_segs > wq_desc_avail) {
if (index > 0)
- enic_post_wq_index(wq);
-
- /* wq cleanup and try again */
- if (!enic_cleanup_wq(enic, wq) ||
- (nb_segs > vnic_wq_desc_avail(wq))) {
- return index;
- }
+ goto post;
+ goto done;
}
pkt_len = tx_pkt->pkt_len;
- vlan_id = tx_pkt->vlan_tci;
+ data_len = tx_pkt->data_len;
ol_flags = tx_pkt->ol_flags;
- for (frags = 0; inc_len < pkt_len; frags++) {
- if (!tx_pkt)
+ mss = 0;
+ vlan_id = 0;
+ vlan_tag_insert = 0;
+ bus_addr = (dma_addr_t)
+ (tx_pkt->buf_physaddr + tx_pkt->data_off);
+
+ descs = (struct wq_enet_desc *)wq->ring.descs;
+ desc_p = descs + head_idx;
+
+ eop = (data_len == pkt_len);
+
+ if (ol_flags & ol_flags_mask) {
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ vlan_tag_insert = 1;
+ vlan_id = tx_pkt->vlan_tci;
+ }
+
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ mss |= ENIC_CALC_IP_CKSUM;
+
+ /* Nic uses just 1 bit for UDP and TCP */
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ case PKT_TX_UDP_CKSUM:
+ mss |= ENIC_CALC_TCP_UDP_CKSUM;
break;
- next_tx_pkt = tx_pkt->next;
- seg_len = tx_pkt->data_len;
- inc_len += seg_len;
-
- host_tx_descs++;
- last_seg = 0;
- eop = 0;
- if ((pkt_len == inc_len) || !next_tx_pkt) {
- eop = 1;
- /* post if last packet in batch or > thresh */
- if ((index == (nb_pkts - 1)) ||
- (host_tx_descs > ENIC_TX_POST_THRESH)) {
- last_seg = 1;
- host_tx_descs = 0;
- }
}
- enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len,
- !frags, eop, last_seg, ol_flags, vlan_id);
- tx_pkt = next_tx_pkt;
+ }
+
+ wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
+ eop, 0, vlan_tag_insert, vlan_id, 0);
+
+ *desc_p = desc_tmp;
+ buf = &wq->bufs[head_idx];
+ buf->mb = (void *)tx_pkt;
+ head_idx = enic_ring_incr(desc_count, head_idx);
+ wq_desc_avail--;
+
+ if (!eop) {
+ for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
+ tx_pkt->next) {
+ data_len = tx_pkt->data_len;
+
+ if (tx_pkt->next == NULL)
+ eop = 1;
+ desc_p = descs + head_idx;
+ bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
+ + tx_pkt->data_off);
+ wq_enet_desc_enc((struct wq_enet_desc *)
+ &desc_tmp, bus_addr, data_len,
+ mss, 0, 0, eop, eop, 0,
+ vlan_tag_insert, vlan_id, 0);
+
+ *desc_p = desc_tmp;
+ buf = &wq->bufs[head_idx];
+ buf->mb = (void *)tx_pkt;
+ head_idx = enic_ring_incr(desc_count, head_idx);
+ wq_desc_avail--;
+ }
}
}
+ post:
+ rte_wmb();
+ iowrite32(head_idx, &wq->ctrl->posted_index);
+ done:
+ wq->ring.desc_avail = wq_desc_avail;
+ wq->head_idx = head_idx;
- enic_cleanup_wq(enic, wq);
return index;
}
+
+