X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvmxnet3%2Fvmxnet3_rxtx.c;h=31f396c58be8065d8a533bc3d85007cb148a3c33;hb=1daff5260ff8584b3ada836341662ceaf0ed1fba;hp=8a8e441140c95d25257bdc5b2e507365496baecc;hpb=1089b5066da13113a68ab714bce6052088cbbdc1;p=dpdk.git diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index 8a8e441140..31f396c58b 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -57,7 +57,6 @@ #include #include #include -#include #include #include #include @@ -86,16 +85,6 @@ static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *); static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *); #endif -static struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} - #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq) @@ -106,7 +95,7 @@ vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq) return; PMD_RX_LOG(DEBUG, - "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.", + "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.", rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base); PMD_RX_LOG(DEBUG, "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.", @@ -136,7 +125,7 @@ vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq) if (txq == NULL) return; - PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p data ring base : 0x%p.", + PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.", txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base); PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.", (unsigned long)txq->cmd_ring.basePA, @@ -175,7 +164,6 @@ vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring) ring->buf_info = NULL; } - void vmxnet3_dev_tx_queue_release(void *txq) { @@ -289,27 +277,45 @@ vmxnet3_dev_clear_queues(struct rte_eth_dev *dev) } } +static int +vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq) +{ + int completed = 0; + struct rte_mbuf *mbuf; + + /* Release cmd_ring descriptor and free mbuf */ + RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1); + + mbuf = txq->cmd_ring.buf_info[eop_idx].m; + if (mbuf == NULL) + rte_panic("EOP desc does not point to a valid mbuf"); + rte_pktmbuf_free(mbuf); + + txq->cmd_ring.buf_info[eop_idx].m = NULL; + + while (txq->cmd_ring.next2comp != eop_idx) { + /* no out-of-order completion */ + RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0); + vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring); + completed++; + } + + /* Mark the txd for which tcd was generated as completed */ + vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring); + + return completed + 1; +} + static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq) { int completed = 0; - struct rte_mbuf *mbuf; vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring; struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *) (comp_ring->base + comp_ring->next2proc); while (tcd->gen == comp_ring->gen) { - /* Release cmd_ring descriptor and free mbuf */ - VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1); - while (txq->cmd_ring.next2comp != tcd->txdIdx) { - mbuf = txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m; - txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m = NULL; - rte_pktmbuf_free_seg(mbuf); - - /* Mark the txd for which tcd was generated as completed */ - vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring); - completed++; - } + completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq); vmxnet3_comp_ring_adv_next2proc(comp_ring); tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base + @@ -326,6 +332,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_tx; vmxnet3_tx_queue_t *txq = tx_queue; struct vmxnet3_hw *hw = txq->hw; + Vmxnet3_TxQueueCtrl *txq_ctrl = &txq->shared->ctrl; + uint32_t deferred = rte_le_to_cpu_32(txq_ctrl->txNumDeferred); if (unlikely(txq->stopped)) { PMD_TX_LOG(DEBUG, "Tx queue is stopped."); @@ -342,21 +350,53 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t first2fill, avail, dw2; struct rte_mbuf *txm = tx_pkts[nb_tx]; struct rte_mbuf *m_seg = txm; + int copy_size = 0; + bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0; + /* # of descriptors needed for a packet. */ + unsigned count = txm->nb_segs; - /* Is this packet execessively fragmented, then drop */ - if (unlikely(txm->nb_segs > VMXNET3_MAX_TXD_PER_PKT)) { - ++txq->stats.drop_too_many_segs; - ++txq->stats.drop_total; + avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring); + if (count > avail) { + /* Is command ring full? */ + if (unlikely(avail == 0)) { + PMD_TX_LOG(DEBUG, "No free ring descriptors"); + txq->stats.tx_ring_full++; + txq->stats.drop_total += (nb_pkts - nb_tx); + break; + } + + /* Command ring is not full but cannot handle the + * multi-segmented packet. Let's try the next packet + * in this case. + */ + PMD_TX_LOG(DEBUG, "Running out of ring descriptors " + "(avail %d needed %d)", avail, count); + txq->stats.drop_total++; + if (tso) + txq->stats.drop_tso++; rte_pktmbuf_free(txm); - ++nb_tx; + nb_tx++; continue; } - /* Is command ring full? */ - avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring); - if (txm->nb_segs > avail) { - ++txq->stats.tx_ring_full; - break; + /* Drop non-TSO packet that is excessively fragmented */ + if (unlikely(!tso && count > VMXNET3_MAX_TXD_PER_PKT)) { + PMD_TX_LOG(ERR, "Non-TSO packet cannot occupy more than %d tx " + "descriptors. Packet dropped.", VMXNET3_MAX_TXD_PER_PKT); + txq->stats.drop_too_many_segs++; + txq->stats.drop_total++; + rte_pktmbuf_free(txm); + nb_tx++; + continue; + } + + if (txm->nb_segs == 1 && + rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) { + struct Vmxnet3_TxDataDesc *tdd; + + tdd = txq->data_ring.base + txq->cmd_ring.next2fill; + copy_size = rte_pktmbuf_pkt_len(txm); + rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size); } /* use the previous gen bit for the SOP desc */ @@ -365,13 +405,19 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, do { /* Remember the transmit buffer for cleanup */ tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill; - tbi->m = m_seg; /* NB: the following assumes that VMXNET3 maximum - transmit buffer size (16K) is greater than - maximum sizeof mbuf segment size. */ + * transmit buffer size (16K) is greater than + * maximum size of mbuf segment size. + */ gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill; - gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg); + if (copy_size) + gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA + + txq->cmd_ring.next2fill * + sizeof(struct Vmxnet3_TxDataDesc)); + else + gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg); + gdesc->dword[2] = dw2 | m_seg->data_len; gdesc->dword[3] = 0; @@ -382,6 +428,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT; } while ((m_seg = m_seg->next) != NULL); + /* set the last buf_info for the pkt */ + tbi->m = txm; /* Update the EOP descriptor */ gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ; @@ -392,21 +440,52 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, gdesc->txd.tci = txm->vlan_tci; } - /* TODO: Add transmit checksum offload here */ + if (tso) { + uint16_t mss = txm->tso_segsz; + + RTE_ASSERT(mss > 0); + + gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len; + gdesc->txd.om = VMXNET3_OM_TSO; + gdesc->txd.msscof = mss; + + deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss; + } else if (txm->ol_flags & PKT_TX_L4_MASK) { + gdesc->txd.om = VMXNET3_OM_CSUM; + gdesc->txd.hlen = txm->l2_len + txm->l3_len; + + switch (txm->ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum); + break; + case PKT_TX_UDP_CKSUM: + gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum); + break; + default: + PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx", + txm->ol_flags & PKT_TX_L4_MASK); + abort(); + } + deferred++; + } else { + gdesc->txd.hlen = 0; + gdesc->txd.om = VMXNET3_OM_NONE; + gdesc->txd.msscof = 0; + deferred++; + } /* flip the GEN bit on the SOP */ rte_compiler_barrier(); gdesc->dword[2] ^= VMXNET3_TXD_GEN; - txq->shared->ctrl.txNumDeferred++; + txq_ctrl->txNumDeferred = rte_cpu_to_le_32(deferred); nb_tx++; } - PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", txq->shared->ctrl.txThreshold); - - if (txq->shared->ctrl.txNumDeferred >= txq->shared->ctrl.txThreshold) { + PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", rte_le_to_cpu_32(txq_ctrl->txThreshold)); - txq->shared->ctrl.txNumDeferred = 0; + if (deferred >= rte_le_to_cpu_32(txq_ctrl->txThreshold)) { + txq_ctrl->txNumDeferred = 0; /* Notify vSwitch that packets are available. */ VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN), txq->cmd_ring.next2fill); @@ -418,13 +497,12 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* * Allocates mbufs and clusters. Post rx descriptors with buffer details * so that device can receive packets in those buffers. - * Ring layout: - * Among the two rings, 1st ring contains buffers of type 0 and type1. + * Ring layout: + * Among the two rings, 1st ring contains buffers of type 0 and type 1. * bufs_per_pkt is set such that for non-LRO cases all the buffers required * by a frame will fit in 1st ring (1st buf of type0 and rest of type1). * 2nd ring contains buffers of type 1 alone. Second ring mostly be used * only for LRO. - * */ static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) @@ -454,7 +532,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill); /* Allocate blank mbuf for the current Rx Descriptor */ - mbuf = rte_rxmbuf_alloc(rxq->mp); + mbuf = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(mbuf == NULL)) { PMD_RX_LOG(ERR, "Error allocating mbuf"); rxq->stats.rx_buf_alloc_failure++; @@ -469,8 +547,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) buf_info->m = mbuf; buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM); - buf_info->bufPA = - rte_mbuf_data_dma_addr_default(mbuf); + buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf); /* Load Rx Descriptor with the buffer's GPA */ rxd->addr = buf_info->bufPA; @@ -497,12 +574,6 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) static void vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm) { - /* Check for hardware stripped VLAN tag */ - if (rcd->ts) { - rxm->ol_flags |= PKT_RX_VLAN_PKT; - rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci); - } - /* Check for RSS */ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) { rxm->ol_flags |= PKT_RX_RSS_HASH; @@ -568,36 +639,13 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) idx = rcd->rxdIdx; ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1); rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx; + RTE_SET_USED(rxd); /* used only for assert when enabled */ rbi = rxq->cmd_ring[ring_idx].buf_info + idx; - if (unlikely(rcd->sop != 1 || rcd->eop != 1)) { - rte_pktmbuf_free_seg(rbi->m); - PMD_RX_LOG(DEBUG, "Packet spread across multiple buffers\n)"); - goto rcd_done; - } - PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx); - VMXNET3_ASSERT(rcd->len <= rxd->len); - VMXNET3_ASSERT(rbi->m); - - if (unlikely(rcd->len == 0)) { - PMD_RX_LOG(DEBUG, "Rx buf was skipped. rxring[%d][%d]\n)", - ring_idx, idx); - VMXNET3_ASSERT(rcd->sop && rcd->eop); - rte_pktmbuf_free_seg(rbi->m); - goto rcd_done; - } - - /* Assuming a packet is coming in a single packet buffer */ - if (unlikely(rxd->btype != VMXNET3_RXD_BTYPE_HEAD)) { - PMD_RX_LOG(DEBUG, - "Alert : Misbehaving device, incorrect " - " buffer type used. Packet dropped."); - rte_pktmbuf_free_seg(rbi->m); - goto rcd_done; - } - VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD); + RTE_ASSERT(rcd->len <= rxd->len); + RTE_ASSERT(rbi->m); /* Get the packet buffer pointer from buf_info */ rxm = rbi->m; @@ -610,7 +658,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxq->cmd_ring[ring_idx].next2comp = idx; /* For RCD with EOP set, check if there is frame error */ - if (unlikely(rcd->err)) { + if (unlikely(rcd->eop && rcd->err)) { rxq->stats.drop_total++; rxq->stats.drop_err++; @@ -625,7 +673,6 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) goto rcd_done; } - /* Initialize newly received packet buffer */ rxm->port = rxq->port_id; rxm->nb_segs = 1; @@ -636,12 +683,57 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->ol_flags = 0; rxm->vlan_tci = 0; - vmxnet3_rx_offload(rcd, rxm); + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (rcd->sop) { + RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD); + + if (unlikely(rcd->len == 0)) { + RTE_ASSERT(rcd->eop); + + PMD_RX_LOG(DEBUG, + "Rx buf was skipped. rxring[%d][%d])", + ring_idx, idx); + rte_pktmbuf_free_seg(rxm); + goto rcd_done; + } + + rxq->start_seg = rxm; + vmxnet3_rx_offload(rcd, rxm); + } else { + struct rte_mbuf *start = rxq->start_seg; + + RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY); + + start->pkt_len += rxm->data_len; + start->nb_segs++; + + rxq->last_seg->next = rxm; + } + rxq->last_seg = rxm; + + if (rcd->eop) { + struct rte_mbuf *start = rxq->start_seg; + + /* Check for hardware stripped VLAN tag */ + if (rcd->ts) { + start->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED); + start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci); + } + + rx_pkts[nb_rx++] = start; + rxq->start_seg = NULL; + } - rx_pkts[nb_rx++] = rxm; rcd_done: rxq->cmd_ring[ring_idx].next2comp = idx; - VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size); + VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, + rxq->cmd_ring[ring_idx].size); /* It's time to allocate some new buf and renew descriptors */ vmxnet3_post_rx_bufs(rxq, ring_idx); @@ -656,8 +748,7 @@ rcd_done: rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd; nb_rxd++; if (nb_rxd > rxq->cmd_ring[0].size) { - PMD_RX_LOG(ERR, - "Used up quota of receiving packets," + PMD_RX_LOG(ERR, "Used up quota of receiving packets," " relinquish control."); break; } @@ -679,15 +770,15 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, const struct rte_memzone *mz; snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.name, ring_name, - dev->data->port_id, queue_id); + dev->driver->pci_drv.driver.name, ring_name, + dev->data->port_id, queue_id); mz = rte_memzone_lookup(z_name); if (mz) return mz; return rte_memzone_reserve_aligned(z_name, ring_size, - socket_id, 0, VMXNET3_RING_BA_ALIGN); + socket_id, 0, VMXNET3_RING_BA_ALIGN); } int @@ -695,7 +786,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - __attribute__((unused)) const struct rte_eth_txconf *tx_conf) + __rte_unused const struct rte_eth_txconf *tx_conf) { struct vmxnet3_hw *hw = dev->data->dev_private; const struct rte_memzone *mz; @@ -707,13 +798,14 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMS) != - ETH_TXQ_FLAGS_NOXSUMS) { - PMD_INIT_LOG(ERR, "TX no support for checksum offload yet"); + if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) != + ETH_TXQ_FLAGS_NOXSUMSCTP) { + PMD_INIT_LOG(ERR, "SCTP checksum offload not supported"); return -EINVAL; } - txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE); + txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), + RTE_CACHE_LINE_SIZE); if (txq == NULL) { PMD_INIT_LOG(ERR, "Can not allocate tx queue structure"); return -ENOMEM; @@ -796,32 +888,22 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - __attribute__((unused)) const struct rte_eth_rxconf *rx_conf, + __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { const struct rte_memzone *mz; struct vmxnet3_rx_queue *rxq; - struct vmxnet3_hw *hw = dev->data->dev_private; + struct vmxnet3_hw *hw = dev->data->dev_private; struct vmxnet3_cmd_ring *ring0, *ring1, *ring; struct vmxnet3_comp_ring *comp_ring; int size; uint8_t i; char mem_name[32]; - uint16_t buf_size; PMD_INIT_FUNC_TRACE(); - buf_size = rte_pktmbuf_data_room_size(mp) - - RTE_PKTMBUF_HEADROOM; - - if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) { - PMD_INIT_LOG(ERR, "buf_size = %u, max_pkt_len = %u, " - "VMXNET3 don't support scatter packets yet", - buf_size, dev->data->dev_conf.rxmode.max_rx_pkt_len); - return -EINVAL; - } - - rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), RTE_CACHE_LINE_SIZE); + rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), + RTE_CACHE_LINE_SIZE); if (rxq == NULL) { PMD_INIT_LOG(ERR, "Can not allocate rx queue structure"); return -ENOMEM; @@ -895,7 +977,9 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, ring->rid = i; snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i); - ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE); + ring->buf_info = rte_zmalloc(mem_name, + ring->size * sizeof(vmxnet3_buf_info_t), + RTE_CACHE_LINE_SIZE); if (ring->buf_info == NULL) { PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure"); return -ENOMEM; @@ -929,16 +1013,22 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev) /* Passing 0 as alloc_num will allocate full ring */ ret = vmxnet3_post_rx_bufs(rxq, j); if (ret <= 0) { - PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d", i, j); + PMD_INIT_LOG(ERR, + "ERROR: Posting Rxq: %d buffers ring: %d", + i, j); return -ret; } - /* Updating device with the index:next2fill to fill the mbufs for coming packets */ + /* + * Updating device with the index:next2fill to fill the + * mbufs for coming packets. + */ if (unlikely(rxq->shared->ctrl.updateRxProd)) { VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN), rxq->cmd_ring[j].next2fill); } } rxq->stopped = FALSE; + rxq->start_seg = NULL; } for (i = 0; i < dev->data->nb_tx_queues; i++) { @@ -979,7 +1069,7 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev) dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ; /* loading hashKeySize */ dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE; - /* loading indTableSize : Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/ + /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/ dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4); if (port_rss_conf->rss_key == NULL) { @@ -988,7 +1078,8 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev) } /* loading hashKey */ - memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key, dev_rss_conf->hashKeySize); + memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key, + dev_rss_conf->hashKeySize); /* loading indTable */ for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {