X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_i40e%2Fi40e_rxtx.c;h=25a5f6f9371085fa87c20ccc7ee34ebc3fa71625;hb=08b563ffb19d8baf59dd84200f25bc85031d18a7;hp=3a6a2d8106a9ce9ddede688f319e3561863fc638;hpb=d025265b7e652f9165cb509e23f380ed896721b1;p=dpdk.git diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c index 3a6a2d8106..25a5f6f937 100644 --- a/lib/librte_pmd_i40e/i40e_rxtx.c +++ b/lib/librte_pmd_i40e/i40e_rxtx.c @@ -78,9 +78,7 @@ (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) #define RTE_MBUF_DATA_DMA_ADDR(mb) \ - ((uint64_t)((mb)->buf_physaddr + \ - (uint64_t)((char *)((mb)->pkt.data) - \ - (char *)(mb)->buf_addr))) + ((uint64_t)((mb)->buf_physaddr + (mb)->data_off)) static const struct rte_memzone * i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev, @@ -88,9 +86,6 @@ i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t queue_id, uint32_t ring_size, int socket_id); -static void i40e_reset_rx_queue(struct i40e_rx_queue *rxq); -static void i40e_reset_tx_queue(struct i40e_tx_queue *txq); -static void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq); static uint16_t i40e_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); @@ -473,7 +468,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp) struct rte_mbuf *m; m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0); + __rte_mbuf_sanity_check_raw(m, 0); return m; } @@ -614,9 +609,9 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq) I40E_RXD_QW1_STATUS_SHIFT; pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len; - mb->pkt.data_len = pkt_len; - mb->pkt.pkt_len = pkt_len; - mb->pkt.vlan_macip.f.vlan_tci = rx_status & + mb->data_len = pkt_len; + mb->pkt_len = pkt_len; + mb->vlan_tci = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? rte_le_to_cpu_16(\ rxdp[j].wb.qword0.lo_dword.l2tag1) : 0; @@ -625,7 +620,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq) pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1); mb->ol_flags = pkt_flags; if (pkt_flags & PKT_RX_RSS_HASH) - mb->pkt.hash.rss = rte_le_to_cpu_32(\ + mb->hash.rss = rte_le_to_cpu_32(\ rxdp->wb.qword0.hi_dword.rss); } @@ -687,11 +682,10 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) for (i = 0; i < rxq->rx_free_thresh; i++) { mb = rxep[i].mbuf; rte_mbuf_refcnt_set(mb, 1); - mb->type = RTE_MBUF_PKT; - mb->pkt.next = NULL; - mb->pkt.data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM; - mb->pkt.nb_segs = 1; - mb->pkt.in_port = rxq->port_id; + mb->next = NULL; + mb->data_off = RTE_PKTMBUF_HEADROOM; + mb->nb_segs = 1; + mb->port = rxq->port_id; dma_addr = rte_cpu_to_le_64(\ RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb)); rxdp[i].read.hdr_addr = dma_addr; @@ -846,15 +840,15 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len; - rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM; - rte_prefetch0(rxm->pkt.data); - rxm->pkt.nb_segs = 1; - rxm->pkt.next = NULL; - rxm->pkt.pkt_len = rx_packet_len; - rxm->pkt.data_len = rx_packet_len; - rxm->pkt.in_port = rxq->port_id; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = rx_packet_len; + rxm->data_len = rx_packet_len; + rxm->port = rxq->port_id; - rxm->pkt.vlan_macip.f.vlan_tci = rx_status & + rxm->vlan_tci = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0; pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); @@ -862,7 +856,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1); rxm->ol_flags = pkt_flags; if (pkt_flags & PKT_RX_RSS_HASH) - rxm->pkt.hash.rss = + rxm->hash.rss = rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); rx_pkts[nb_rx++] = rxm; @@ -949,8 +943,8 @@ i40e_recv_scattered_pkts(void *rx_queue, rxdp->read.pkt_addr = dma_addr; rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - rxm->pkt.data_len = rx_packet_len; - rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM; + rxm->data_len = rx_packet_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; /** * If this is the first buffer of the received packet, set the @@ -961,14 +955,14 @@ i40e_recv_scattered_pkts(void *rx_queue, */ if (!first_seg) { first_seg = rxm; - first_seg->pkt.nb_segs = 1; - first_seg->pkt.pkt_len = rx_packet_len; + first_seg->nb_segs = 1; + first_seg->pkt_len = rx_packet_len; } else { - first_seg->pkt.pkt_len = - (uint16_t)(first_seg->pkt.pkt_len + + first_seg->pkt_len = + (uint16_t)(first_seg->pkt_len + rx_packet_len); - first_seg->pkt.nb_segs++; - last_seg->pkt.next = rxm; + first_seg->nb_segs++; + last_seg->next = rxm; } /** @@ -991,23 +985,23 @@ i40e_recv_scattered_pkts(void *rx_queue, * the length of that CRC part from the data length of the * previous mbuf. */ - rxm->pkt.next = NULL; + rxm->next = NULL; if (unlikely(rxq->crc_len > 0)) { - first_seg->pkt.pkt_len -= ETHER_CRC_LEN; + first_seg->pkt_len -= ETHER_CRC_LEN; if (rx_packet_len <= ETHER_CRC_LEN) { rte_pktmbuf_free_seg(rxm); - first_seg->pkt.nb_segs--; - last_seg->pkt.data_len = - (uint16_t)(last_seg->pkt.data_len - + first_seg->nb_segs--; + last_seg->data_len = + (uint16_t)(last_seg->data_len - (ETHER_CRC_LEN - rx_packet_len)); - last_seg->pkt.next = NULL; + last_seg->next = NULL; } else - rxm->pkt.data_len = (uint16_t)(rx_packet_len - + rxm->data_len = (uint16_t)(rx_packet_len - ETHER_CRC_LEN); } - first_seg->pkt.in_port = rxq->port_id; - first_seg->pkt.vlan_macip.f.vlan_tci = (rx_status & + first_seg->port = rxq->port_id; + first_seg->vlan_tci = (rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0; pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); @@ -1015,11 +1009,12 @@ i40e_recv_scattered_pkts(void *rx_queue, pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1); first_seg->ol_flags = pkt_flags; if (pkt_flags & PKT_RX_RSS_HASH) - rxm->pkt.hash.rss = + rxm->hash.rss = rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); /* Prefetch data of first segment, if configured to do so. */ - rte_prefetch0(first_seg->pkt.data); + rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, + first_seg->data_off)); rx_pkts[nb_rx++] = first_seg; first_seg = NULL; } @@ -1109,8 +1104,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); ol_flags = tx_pkt->ol_flags; - l2_len = tx_pkt->pkt.vlan_macip.f.l2_len; - l3_len = tx_pkt->pkt.vlan_macip.f.l3_len; + l2_len = tx_pkt->l2_len; + l3_len = tx_pkt->l3_len; /* Calculate the number of context descriptors needed. */ nb_ctx = i40e_calc_context_desc(ol_flags); @@ -1120,7 +1115,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * a packet equals to the number of the segments of that * packet plus 1 context descriptor if needed. */ - nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + nb_ctx); + nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); tx_last = (uint16_t)(tx_id + nb_used - 1); /* Circular ring */ @@ -1146,8 +1141,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Descriptor based VLAN insertion */ if (ol_flags & PKT_TX_VLAN_PKT) { - tx_flags |= tx_pkt->pkt.vlan_macip.f.vlan_tci << - I40E_TX_FLAG_L2TAG1_SHIFT; + tx_flags |= tx_pkt->vlan_tci << + I40E_TX_FLAG_L2TAG1_SHIFT; tx_flags |= I40E_TX_FLAG_INSERT_VLAN; td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >> @@ -1203,7 +1198,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txe->mbuf = m_seg; /* Setup TX Descriptor */ - slen = m_seg->pkt.data_len; + slen = m_seg->data_len; buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd, @@ -1211,7 +1206,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txe->last_id = tx_last; tx_id = txe->next_id; txe = txn; - m_seg = m_seg->pkt.next; + m_seg = m_seg->next; } while (m_seg != NULL); /* The last packet data descriptor needs End Of Packet (EOP) */ @@ -1299,7 +1294,7 @@ tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) txdp->buffer_addr = rte_cpu_to_le_64(dma_addr); txdp->cmd_type_offset_bsz = i40e_build_ctob((uint32_t)I40E_TD_CMD, 0, - (*pkts)->pkt.data_len, 0); + (*pkts)->data_len, 0); } } @@ -1313,7 +1308,7 @@ tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) txdp->buffer_addr = rte_cpu_to_le_64(dma_addr); txdp->cmd_type_offset_bsz = i40e_build_ctob((uint32_t)I40E_TD_CMD, 0, - (*pkts)->pkt.data_len, 0); + (*pkts)->data_len, 0); } /* Fill hardware descriptor ring with mbuf data */ @@ -1428,6 +1423,118 @@ i40e_xmit_pkts_simple(void *tx_queue, return nb_tx; } +int +i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private); + struct i40e_rx_queue *rxq; + int err = -1; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t q_base = vsi->base_queue; + + PMD_INIT_FUNC_TRACE(); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + err = i40e_alloc_rx_queue_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf\n"); + return err; + } + + rte_wmb(); + + /* Init the RX tail regieter. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, TRUE); + + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n", + rx_queue_id); + + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + } + } + + return err; +} + +int +i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private); + struct i40e_rx_queue *rxq; + int err; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t q_base = vsi->base_queue; + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, FALSE); + + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n", + rx_queue_id); + return err; + } + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + } + + return 0; +} + +int +i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private); + int err = -1; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t q_base = vsi->base_queue; + + PMD_INIT_FUNC_TRACE(); + + if (tx_queue_id < dev->data->nb_tx_queues) { + err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, TRUE); + if (err) + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n", + tx_queue_id); + } + + return err; +} + +int +i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private); + struct i40e_tx_queue *txq; + int err; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t q_base = vsi->base_queue; + + if (tx_queue_id < dev->data->nb_tx_queues) { + txq = dev->data->tx_queues[tx_queue_id]; + + err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, FALSE); + + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n", + tx_queue_id); + return err; + } + + i40e_tx_queue_release_mbufs(txq); + i40e_reset_tx_queue(txq); + } + + return 0; +} + int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1482,6 +1589,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, 0 : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; rxq->vsi = vsi; + rxq->start_rx_per_q = rx_conf->start_rx_per_q; /* Allocate the maximun number of RX ring hardware descriptor. */ ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC; @@ -1767,6 +1875,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->port_id = dev->data->port_id; txq->txq_flags = tx_conf->txq_flags; txq->vsi = vsi; + txq->start_tx_per_q = tx_conf->start_tx_per_q; #ifdef RTE_LIBRTE_XEN_DOM0 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); @@ -1829,7 +1938,7 @@ i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev, char z_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; - rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id); mz = rte_memzone_lookup(z_name); @@ -1874,7 +1983,7 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq) #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ } -static void +void i40e_reset_rx_queue(struct i40e_rx_queue *rxq) { unsigned i; @@ -1905,7 +2014,7 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq) rxq->pkt_last_seg = NULL; } -static void +void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) { uint16_t i; @@ -1923,7 +2032,7 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) } } -static void +void i40e_reset_tx_queue(struct i40e_tx_queue *txq) { struct i40e_tx_entry *txe; @@ -2020,11 +2129,10 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq) } rte_mbuf_refcnt_set(mbuf, 1); - mbuf->type = RTE_MBUF_PKT; - mbuf->pkt.next = NULL; - mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM; - mbuf->pkt.nb_segs = 1; - mbuf->pkt.in_port = rxq->port_id; + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); @@ -2161,7 +2269,7 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) } rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); - err = i40e_alloc_rx_queue_mbufs(rxq); + mbp_priv = rte_mempool_get_priv(rxq->mp); buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM); @@ -2172,16 +2280,10 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) dev->rx_pkt_burst = i40e_recv_scattered_pkts; } - rte_wmb(); - /* Init the RX tail regieter. */ - I40E_PCI_REG_WRITE(rxq->qrx_tail, 0); I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); - if (err) - PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf\n"); - - return err; + return 0; } void