static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int ena_start(struct rte_eth_dev *dev);
static void ena_close(struct rte_eth_dev *dev);
-static void ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
static void ena_rx_queue_release(void *queue);
static void ena_rx_queue_release_bufs(struct ena_ring *ring);
static void ena_tx_queue_release_bufs(struct ena_ring *ring);
static int ena_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+ int wait_to_complete);
static int ena_queue_restart(struct ena_ring *ring);
static int ena_queue_restart_all(struct rte_eth_dev *dev,
enum ena_ring_type ring_type);
static void ena_stats_restart(struct rte_eth_dev *dev);
-static void ena_infos_get(__rte_unused struct rte_eth_dev *dev,
+static void ena_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int ena_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
static void ena_tx_queue_release_bufs(struct ena_ring *ring)
{
- unsigned int ring_mask = ring->ring_size - 1;
+ unsigned int i;
- while (ring->next_to_clean != ring->next_to_use) {
- struct ena_tx_buffer *tx_buf =
- &ring->tx_buffer_info[ring->next_to_clean & ring_mask];
+ for (i = 0; i < ring->ring_size; ++i) {
+ struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
if (tx_buf->mbuf)
rte_pktmbuf_free(tx_buf->mbuf);
rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
}
-static void ena_stats_get(struct rte_eth_dev *dev,
+static int ena_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
{
struct ena_admin_basic_stats ena_stats;
int rc;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return;
+ return -ENOTSUP;
memset(&ena_stats, 0, sizeof(ena_stats));
rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats);
if (unlikely(rc)) {
RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA");
- return;
+ return rc;
}
/* Set of basic statistics from ENA */
stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
+ return 0;
}
static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
/* prepare physical address for DMA transaction */
- ebuf.paddr = mbuf->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
/* pass resource to device */
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
adapter->pdev = pci_dev;
PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->speed_capa =
ETH_LINK_SPEED_1G |
uint64_t ol_flags;
uint16_t frag_field;
- /* ENA needs partial checksum for TSO packets only, skip early */
- if (!tx_ring->adapter->tso4_supported)
- return nb_pkts;
-
for (i = 0; i != nb_pkts; i++) {
m = tx_pkts[i];
ol_flags = m->ol_flags;
+ if (!(ol_flags & PKT_TX_IPV4))
+ continue;
+
+ /* If there was not L2 header length specified, assume it is
+ * length of the ethernet header.
+ */
+ if (unlikely(m->l2_len == 0))
+ m->l2_len = sizeof(struct ether_hdr);
+
+ ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ m->l2_len);
+ frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
+
+ if ((frag_field & IPV4_HDR_DF_FLAG) != 0) {
+ m->packet_type |= RTE_PTYPE_L4_NONFRAG;
+
+ /* If IPv4 header has DF flag enabled and TSO support is
+ * disabled, partial chcecksum should not be calculated.
+ */
+ if (!tx_ring->adapter->tso4_supported)
+ continue;
+ }
+
if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
(ol_flags & PKT_TX_L4_MASK) ==
PKT_TX_SCTP_CKSUM) {
}
#endif
- if (!(m->ol_flags & PKT_TX_IPV4))
- continue;
-
- ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
- m->l2_len);
- frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
- if (frag_field & IPV4_HDR_DF_FLAG)
- continue;
-
/* In case we are supposed to TSO and have DF not set (DF=0)
* hardware must be provided with partial checksum, otherwise
* it will take care of necessary calculations.
* consideration pushed header
*/
if (mbuf->data_len > ena_tx_ctx.header_len) {
- ebuf->paddr = mbuf->buf_physaddr +
+ ebuf->paddr = mbuf->buf_iova +
mbuf->data_off +
ena_tx_ctx.header_len;
ebuf->len = mbuf->data_len - ena_tx_ctx.header_len;
}
while ((mbuf = mbuf->next) != NULL) {
- ebuf->paddr = mbuf->buf_physaddr + mbuf->data_off;
+ ebuf->paddr = mbuf->buf_iova + mbuf->data_off;
ebuf->len = mbuf->data_len;
ebuf++;
tx_info->num_of_bufs++;
/* Free whole mbuf chain */
mbuf = tx_info->mbuf;
rte_pktmbuf_free(mbuf);
+ tx_info->mbuf = NULL;
/* Put back descriptor to the ring for reuse */
tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id;
RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");