ENA_STAT_RX_ENTRY(cnt),
ENA_STAT_RX_ENTRY(bytes),
ENA_STAT_RX_ENTRY(refill_partial),
- ENA_STAT_RX_ENTRY(bad_csum),
+ ENA_STAT_RX_ENTRY(l3_csum_bad),
+ ENA_STAT_RX_ENTRY(l4_csum_bad),
+ ENA_STAT_RX_ENTRY(l4_csum_good),
ENA_STAT_RX_ENTRY(mbuf_alloc_fail),
ENA_STAT_RX_ENTRY(bad_desc_num),
ENA_STAT_RX_ENTRY(bad_req_id),
static struct ena_aenq_handlers aenq_handlers;
-static int ena_device_init(struct ena_com_dev *ena_dev,
+static int ena_device_init(struct ena_adapter *adapter,
struct rte_pci_device *pdev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx,
- bool *wd_state);
+ struct ena_com_dev_get_features_ctx *get_feat_ctx);
static int ena_dev_configure(struct rte_eth_dev *dev);
static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
struct ena_tx_buffer *tx_info,
uint16_t queue_id);
static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev,
uint16_t queue_id);
+static int ena_configure_aenq(struct ena_adapter *adapter);
static const struct eth_dev_ops ena_dev_ops = {
.dev_configure = ena_dev_configure,
.rss_hash_conf_get = ena_rss_hash_conf_get,
};
-static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
+static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring,
+ struct rte_mbuf *mbuf,
struct ena_com_rx_ctx *ena_rx_ctx,
bool fill_hash)
{
+ struct ena_stats_rx *rx_stats = &rx_ring->rx_stats;
uint64_t ol_flags = 0;
uint32_t packet_type = 0;
if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
packet_type |= RTE_PTYPE_L3_IPV4;
- if (unlikely(ena_rx_ctx->l3_csum_err))
+ if (unlikely(ena_rx_ctx->l3_csum_err)) {
+ ++rx_stats->l3_csum_bad;
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
- else
+ } else {
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ }
} else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
packet_type |= RTE_PTYPE_L3_IPV6;
}
- if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag)
+ if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) {
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
- else
- if (unlikely(ena_rx_ctx->l4_csum_err))
+ } else {
+ if (unlikely(ena_rx_ctx->l4_csum_err)) {
+ ++rx_stats->l4_csum_bad;
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
- else
+ } else {
+ ++rx_stats->l4_csum_good;
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+ }
+ }
if (fill_hash &&
likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) {
return i;
}
-static int ena_device_init(struct ena_com_dev *ena_dev,
+static int ena_device_init(struct ena_adapter *adapter,
struct rte_pci_device *pdev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx,
- bool *wd_state)
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
uint32_t aenq_groups;
int rc;
bool readless_supported;
BIT(ENA_ADMIN_WARNING);
aenq_groups &= get_feat_ctx->aenq.supported_groups;
- rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
- if (rc) {
- PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc: %d\n", rc);
- goto err_admin_init;
- }
- *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
+ adapter->all_aenq_groups = aenq_groups;
return 0;
static void check_for_missing_keep_alive(struct ena_adapter *adapter)
{
- if (!adapter->wd_state)
+ if (!(adapter->active_aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)))
return;
if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
struct rte_eth_dev *dev = arg;
struct ena_adapter *adapter = dev->data->dev_private;
+ if (unlikely(adapter->trigger_reset))
+ return;
+
check_for_missing_keep_alive(adapter);
check_for_admin_com_state(adapter);
check_for_tx_completions(adapter);
int rc;
static int adapters_found;
bool disable_meta_caching;
- bool wd_state = false;
eth_dev->dev_ops = &ena_dev_ops;
eth_dev->rx_pkt_burst = ð_ena_recv_pkts;
}
/* device specific initialization routine */
- rc = ena_device_init(ena_dev, pci_dev, &get_feat_ctx, &wd_state);
+ rc = ena_device_init(adapter, pci_dev, &get_feat_ctx);
if (rc) {
PMD_INIT_LOG(CRIT, "Failed to init ENA device\n");
goto err;
}
- adapter->wd_state = wd_state;
+
+ /* Check if device supports LSC */
+ if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE)))
+ adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
set_default_llq_configurations(&llq_config, &get_feat_ctx.llq,
adapter->use_large_llq_hdr);
static int ena_dev_configure(struct rte_eth_dev *dev)
{
struct ena_adapter *adapter = dev->data->dev_private;
+ int rc;
adapter->state = ENA_ADAPTER_STATE_CONFIG;
*/
adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2;
- adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
- adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
+ rc = ena_configure_aenq(adapter);
- return 0;
+ return rc;
}
static void ena_init_rings(struct ena_adapter *adapter,
}
/* fill mbuf attributes if any */
- ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx, fill_hash);
+ ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash);
if (unlikely(mbuf->ol_flags &
- (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) {
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD)))
rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
- ++rx_ring->rx_stats.bad_csum;
- }
rx_pkts[completed] = mbuf;
rx_ring->rx_stats.bytes += mbuf->pkt_len;
}
#endif
+ available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq);
+ if (available_desc < tx_ring->tx_free_thresh)
+ ena_tx_cleanup(tx_ring);
+
for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx]))
break;
tx_ring->size_mask)]);
}
- available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq);
- tx_ring->tx_stats.available_desc = available_desc;
-
/* If there are ready packets to be xmitted... */
if (likely(tx_ring->pkts_without_db)) {
/* ...let HW do its best :-) */
tx_ring->pkts_without_db = false;
}
- if (available_desc < tx_ring->tx_free_thresh)
- ena_tx_cleanup(tx_ring);
-
tx_ring->tx_stats.available_desc =
ena_com_free_q_entries(tx_ring->ena_com_io_sq);
tx_ring->tx_stats.tx_poll++;
return 0;
}
+static int ena_configure_aenq(struct ena_adapter *adapter)
+{
+ uint32_t aenq_groups = adapter->all_aenq_groups;
+ int rc;
+
+ /* All_aenq_groups holds all AENQ functions supported by the device and
+ * the HW, so at first we need to be sure the LSC request is valid.
+ */
+ if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) {
+ if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) {
+ PMD_DRV_LOG(ERR,
+ "LSC requested, but it's not supported by the AENQ\n");
+ return -EINVAL;
+ }
+ } else {
+ /* If LSC wasn't enabled by the app, let's enable all supported
+ * AENQ procedures except the LSC.
+ */
+ aenq_groups &= ~BIT(ENA_ADMIN_LINK_CHANGE);
+ }
+
+ rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups);
+ if (rc != 0) {
+ PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc);
+ return rc;
+ }
+
+ adapter->active_aenq_groups = aenq_groups;
+
+ return 0;
+}
+
/*********************************************************************
* PMD configuration
*********************************************************************/