X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvmxnet3%2Fvmxnet3_rxtx.c;h=4de5d8968c5b57e3b661ac284d10a124dc9c5202;hb=f11596cb943f7c584df814503f068a67580361ff;hp=eb5d0960f321159d1e1208059f90fb6f352fbc92;hpb=c3d685bfcda5d13ce8f52dfefefe362df246358b;p=dpdk.git diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index eb5d0960f3..4de5d8968c 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -83,16 +83,16 @@ #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) -static uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2}; +static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2}; -static inline int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t* , uint8_t); -static inline void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *); +static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t); +static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *); #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *); static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *); #endif -static inline struct rte_mbuf * +static struct rte_mbuf * rte_rxmbuf_alloc(struct rte_mempool *mp) { struct rte_mbuf *m; @@ -156,7 +156,7 @@ vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq) } #endif -static inline void +static void vmxnet3_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring) { while (ring->next2comp != ring->next2fill) { @@ -295,7 +295,7 @@ vmxnet3_dev_clear_queues(struct rte_eth_dev *dev) } } -static inline void +static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq) { int completed = 0; @@ -305,26 +305,21 @@ vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq) (comp_ring->base + comp_ring->next2proc); while (tcd->gen == comp_ring->gen) { - /* Release cmd_ring descriptor and free mbuf */ -#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1); -#endif - mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m; - if (unlikely(mbuf == NULL)) - rte_panic("EOP desc does not point to a valid mbuf"); - else - rte_pktmbuf_free(mbuf); - - - txq->cmd_ring.buf_info[tcd->txdIdx].m = NULL; - /* Mark the txd for which tcd was generated as completed */ - vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring); + while (txq->cmd_ring.next2comp != tcd->txdIdx) { + mbuf = txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m; + txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m = NULL; + rte_pktmbuf_free_seg(mbuf); + + /* Mark the txd for which tcd was generated as completed */ + vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring); + completed++; + } vmxnet3_comp_ring_adv_next2proc(comp_ring); tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base + comp_ring->next2proc); - completed++; } PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed); @@ -335,13 +330,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { uint16_t nb_tx; - Vmxnet3_TxDesc *txd = NULL; - vmxnet3_buf_info_t *tbi = NULL; - struct vmxnet3_hw *hw; - struct rte_mbuf *txm; vmxnet3_tx_queue_t *txq = tx_queue; - - hw = txq->hw; + struct vmxnet3_hw *hw = txq->hw; if (unlikely(txq->stopped)) { PMD_TX_LOG(DEBUG, "Tx queue is stopped."); @@ -353,75 +343,69 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, nb_tx = 0; while (nb_tx < nb_pkts) { + Vmxnet3_GenericDesc *gdesc; + vmxnet3_buf_info_t *tbi; + uint32_t first2fill, avail, dw2; + struct rte_mbuf *txm = tx_pkts[nb_tx]; + struct rte_mbuf *m_seg = txm; + + /* Is this packet execessively fragmented, then drop */ + if (unlikely(txm->nb_segs > VMXNET3_MAX_TXD_PER_PKT)) { + ++txq->stats.drop_too_many_segs; + ++txq->stats.drop_total; + rte_pktmbuf_free(txm); + ++nb_tx; + continue; + } - if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) { - int copy_size = 0; - - txm = tx_pkts[nb_tx]; - /* Don't support scatter packets yet, free them if met */ - if (txm->nb_segs != 1) { - PMD_TX_LOG(DEBUG, "Don't support scatter packets yet, drop!"); - rte_pktmbuf_free(tx_pkts[nb_tx]); - txq->stats.drop_total++; + /* Is command ring full? */ + avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring); + if (txm->nb_segs > avail) { + ++txq->stats.tx_ring_full; + break; + } - nb_tx++; - continue; - } + /* use the previous gen bit for the SOP desc */ + dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; + first2fill = txq->cmd_ring.next2fill; + do { + /* Remember the transmit buffer for cleanup */ + tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill; + tbi->m = m_seg; - txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill); - if (rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) { - struct Vmxnet3_TxDataDesc *tdd; + /* NB: the following assumes that VMXNET3 maximum + transmit buffer size (16K) is greater than + maximum sizeof mbuf segment size. */ + gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill; + gdesc->txd.addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + gdesc->dword[2] = dw2 | m_seg->data_len; + gdesc->dword[3] = 0; - tdd = txq->data_ring.base + txq->cmd_ring.next2fill; - copy_size = rte_pktmbuf_pkt_len(txm); - rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size); - } + /* move to the next2fill descriptor */ + vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring); - /* Fill the tx descriptor */ - tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill; - tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm); - if (copy_size) - txd->addr = rte_cpu_to_le_64(txq->data_ring.basePA + - txq->cmd_ring.next2fill * - sizeof(struct Vmxnet3_TxDataDesc)); - else - txd->addr = tbi->bufPA; - txd->len = txm->data_len; - - /* Mark the last descriptor as End of Packet. */ - txd->cq = 1; - txd->eop = 1; - - /* Add VLAN tag if requested */ - if (txm->ol_flags & PKT_TX_VLAN_PKT) { - txd->ti = 1; - txd->tci = rte_cpu_to_le_16(txm->vlan_tci); - } + /* use the right gen for non-SOP desc */ + dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT; + } while ((m_seg = m_seg->next) != NULL); - /* Record current mbuf for freeing it later in tx complete */ -#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER - VMXNET3_ASSERT(txm); -#endif - tbi->m = txm; + /* Update the EOP descriptor */ + gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ; - /* Set the offloading mode to default */ - txd->hlen = 0; - txd->om = VMXNET3_OM_NONE; - txd->msscof = 0; + /* Add VLAN tag if present */ + gdesc = txq->cmd_ring.base + first2fill; + if (txm->ol_flags & PKT_TX_VLAN_PKT) { + gdesc->txd.ti = 1; + gdesc->txd.tci = txm->vlan_tci; + } - /* finally flip the GEN bit of the SOP desc */ - txd->gen = txq->cmd_ring.gen; - txq->shared->ctrl.txNumDeferred++; + /* TODO: Add transmit checksum offload here */ - /* move to the next2fill descriptor */ - vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring); - nb_tx++; + /* flip the GEN bit on the SOP */ + rte_compiler_barrier(); + gdesc->dword[2] ^= VMXNET3_TXD_GEN; - } else { - PMD_TX_LOG(DEBUG, "No free tx cmd desc(s)"); - txq->stats.drop_total += (nb_pkts - nb_tx); - break; - } + txq->shared->ctrl.txNumDeferred++; + nb_tx++; } PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", txq->shared->ctrl.txThreshold); @@ -448,7 +432,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * only for LRO. * */ -static inline int +static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) { int err = 0; @@ -513,6 +497,43 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) return i; } + +/* Receive side checksum and other offloads */ +static void +vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm) +{ + /* Check for hardware stripped VLAN tag */ + if (rcd->ts) { + rxm->ol_flags |= PKT_RX_VLAN_PKT; + rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci); + } + + /* Check for RSS */ + if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) { + rxm->ol_flags |= PKT_RX_RSS_HASH; + rxm->hash.rss = rcd->rssHash; + } + + /* Check packet type, checksum errors, etc. Only support IPv4 for now. */ + if (rcd->v4) { + struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *); + struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1); + + if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr)) + rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT; + else + rxm->packet_type = RTE_PTYPE_L3_IPV4; + + if (!rcd->cnc) { + if (!rcd->ipc) + rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD; + + if ((rcd->tcp || rcd->udp) && !rcd->tuc) + rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + } +} + /* * Process the Rx Completion Ring of given vmxnet3_rx_queue * for nb_pkts burst and return the number of packets received @@ -562,16 +583,13 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx); -#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER VMXNET3_ASSERT(rcd->len <= rxd->len); VMXNET3_ASSERT(rbi->m); -#endif + if (unlikely(rcd->len == 0)) { PMD_RX_LOG(DEBUG, "Rx buf was skipped. rxring[%d][%d]\n)", ring_idx, idx); -#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER VMXNET3_ASSERT(rcd->sop && rcd->eop); -#endif rte_pktmbuf_free_seg(rbi->m); goto rcd_done; } @@ -584,9 +602,8 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rte_pktmbuf_free_seg(rbi->m); goto rcd_done; } -#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD); -#endif + /* Get the packet buffer pointer from buf_info */ rxm = rbi->m; @@ -613,17 +630,6 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) goto rcd_done; } - /* Check for hardware stripped VLAN tag */ - if (rcd->ts) { - PMD_RX_LOG(DEBUG, "Received packet with vlan ID: %d.", - rcd->tci); - rxm->ol_flags = PKT_RX_VLAN_PKT; - /* Copy vlan tag in packet buffer */ - rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci); - } else { - rxm->ol_flags = 0; - rxm->vlan_tci = 0; - } /* Initialize newly received packet buffer */ rxm->port = rxq->port_id; @@ -632,25 +638,10 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->pkt_len = (uint16_t)rcd->len; rxm->data_len = (uint16_t)rcd->len; rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->ol_flags = 0; + rxm->vlan_tci = 0; - /* Check packet type, checksum errors, etc. Only support IPv4 for now. */ - if (rcd->v4) { - struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *); - struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1); - - if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr)) - rxm->ol_flags |= PKT_RX_IPV4_HDR_EXT; - else - rxm->ol_flags |= PKT_RX_IPV4_HDR; - - if (!rcd->cnc) { - if (!rcd->ipc) - rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD; - - if ((rcd->tcp || rcd->udp) && !rcd->tuc) - rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD; - } - } + vmxnet3_rx_offload(rcd, rxm); rx_pkts[nb_rx++] = rxm; rcd_done: @@ -721,12 +712,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) != - ETH_TXQ_FLAGS_NOMULTSEGS) { - PMD_INIT_LOG(ERR, "TX Multi segment not support yet"); - return -EINVAL; - } - if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMS) != ETH_TXQ_FLAGS_NOXSUMS) { PMD_INIT_LOG(ERR, "TX no support for checksum offload yet");