X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fena%2Fena_ethdev.c;h=80ce1f35324875e6bc000d128bc438919a83405f;hb=a1366b1a2be368503d81ce01925198ba83705f46;hp=6367ad4a747fa5f21bd0ea53cb91fd8c40121267;hpb=a467e8f37a3eec98210c0c3ec04bf6e9506ddd81;p=dpdk.git diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 6367ad4a74..80ce1f3532 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -213,12 +213,12 @@ static void ena_tx_queue_release(void *queue); static void ena_rx_queue_release_bufs(struct ena_ring *ring); static void ena_tx_queue_release_bufs(struct ena_ring *ring); static int ena_link_update(struct rte_eth_dev *dev, - __rte_unused int wait_to_complete); + int wait_to_complete); static int ena_queue_restart(struct ena_ring *ring); static int ena_queue_restart_all(struct rte_eth_dev *dev, enum ena_ring_type ring_type); static void ena_stats_restart(struct rte_eth_dev *dev); -static void ena_infos_get(__rte_unused struct rte_eth_dev *dev, +static void ena_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int ena_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, @@ -689,11 +689,10 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring) static void ena_tx_queue_release_bufs(struct ena_ring *ring) { - unsigned int ring_mask = ring->ring_size - 1; + unsigned int i; - while (ring->next_to_clean != ring->next_to_use) { - struct ena_tx_buffer *tx_buf = - &ring->tx_buffer_info[ring->next_to_clean & ring_mask]; + for (i = 0; i < ring->ring_size; ++i) { + struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; if (tx_buf->mbuf) rte_pktmbuf_free(tx_buf->mbuf); @@ -1173,6 +1172,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, &ebuf, next_to_use_masked); if (unlikely(rc)) { + rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf), + count - i); RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); break; } @@ -1287,7 +1288,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); adapter->pdev = pci_dev; PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", @@ -1449,7 +1450,7 @@ static void ena_infos_get(struct rte_eth_dev *dev, ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); - dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->speed_capa = ETH_LINK_SPEED_1G | @@ -1576,13 +1577,13 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, recv_idx++; } - desc_in_use += 1; + rx_ring->next_to_clean = next_to_clean; + + desc_in_use = desc_in_use - completed + 1; /* Burst refill to save doorbells, memory barriers, const interval */ if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); - rx_ring->next_to_clean = next_to_clean; - return recv_idx; } @@ -1598,14 +1599,33 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint64_t ol_flags; uint16_t frag_field; - /* ENA needs partial checksum for TSO packets only, skip early */ - if (!tx_ring->adapter->tso4_supported) - return nb_pkts; - for (i = 0; i != nb_pkts; i++) { m = tx_pkts[i]; ol_flags = m->ol_flags; + if (!(ol_flags & PKT_TX_IPV4)) + continue; + + /* If there was not L2 header length specified, assume it is + * length of the ethernet header. + */ + if (unlikely(m->l2_len == 0)) + m->l2_len = sizeof(struct ether_hdr); + + ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, + m->l2_len); + frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); + + if ((frag_field & IPV4_HDR_DF_FLAG) != 0) { + m->packet_type |= RTE_PTYPE_L4_NONFRAG; + + /* If IPv4 header has DF flag enabled and TSO support is + * disabled, partial chcecksum should not be calculated. + */ + if (!tx_ring->adapter->tso4_supported) + continue; + } + if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || (ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM) { @@ -1621,15 +1641,6 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } #endif - if (!(m->ol_flags & PKT_TX_IPV4)) - continue; - - ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, - m->l2_len); - frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); - if (frag_field & IPV4_HDR_DF_FLAG) - continue; - /* In case we are supposed to TSO and have DF not set (DF=0) * hardware must be provided with partial checksum, otherwise * it will take care of necessary calculations. @@ -1760,6 +1771,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Free whole mbuf chain */ mbuf = tx_info->mbuf; rte_pktmbuf_free(mbuf); + tx_info->mbuf = NULL; /* Put back descriptor to the ring for reuse */ tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; @@ -1800,4 +1812,4 @@ static struct rte_pci_driver rte_ena_pmd = { RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); -RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio"); +RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");