#include <rte_errno.h>
#include <rte_version.h>
#include <rte_eal_memconfig.h>
+#include <rte_net.h>
#include "ena_ethdev.h"
#include "ena_logs.h"
#define PCI_DEVICE_ID_ENA_VF 0xEC20
#define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21
+#define ENA_TX_OFFLOAD_MASK (\
+ PKT_TX_L4_MASK | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_SEG)
+
+#define ENA_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
+
static struct rte_pci_id pci_id_ena_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) },
static int ena_dev_configure(struct rte_eth_dev *dev);
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
eth_dev->dev_ops = &ena_dev_ops;
eth_dev->rx_pkt_burst = ð_ena_recv_pkts;
eth_dev->tx_pkt_burst = ð_ena_xmit_pkts;
+ eth_dev->tx_pkt_prepare = ð_ena_prep_pkts;
adapter->rte_eth_dev_data = eth_dev->data;
adapter->rte_dev = eth_dev;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
adapter->pdev = pci_dev;
PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n",
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
dev_info->speed_capa =
ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_2_5G |
return recv_idx;
}
+static uint16_t
+eth_ena_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int32_t ret;
+ uint32_t i;
+ struct rte_mbuf *m;
+ uint64_t ol_flags;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
+ (ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_SCTP_CKSUM) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ /* ENA doesn't need different phdr cskum for TSO */
+ ret = rte_net_intel_cksum_flags_prepare(m,
+ ol_flags & ~PKT_TX_TCP_SEG);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct ena_tx_buffer *tx_info;
struct ena_com_buf *ebuf;
uint16_t rc, req_id, total_tx_descs = 0;
- uint16_t sent_idx = 0;
+ uint16_t sent_idx = 0, empty_tx_reqs;
int nb_hw_desc;
/* Check adapter state */
return 0;
}
+ empty_tx_reqs = ring_size - (next_to_use - next_to_clean);
+ if (nb_pkts > empty_tx_reqs)
+ nb_pkts = empty_tx_reqs;
+
for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
mbuf = tx_pkts[sent_idx];
RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd.pci_drv);
RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio");