X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fena%2Fena_ethdev.c;h=ed3dd162ba58595a587b61926ac6b5705a299fd3;hb=67216c31e43d1f09bc02493465d578292f2d9b7a;hp=db2b5ec8e77b741e87eeaa00f827b99d0bcd3598;hpb=06c047b680615678bb8773faaec53156ade94770;p=dpdk.git diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index db2b5ec8e7..ed3dd162ba 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -38,11 +38,6 @@ #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) -enum ethtool_stringset { - ETH_SS_TEST = 0, - ETH_SS_STATS, -}; - struct ena_stats { char name[ETH_GSTRING_LEN]; int stat_offset; @@ -93,8 +88,6 @@ static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(cnt), ENA_STAT_TX_ENTRY(bytes), ENA_STAT_TX_ENTRY(prepare_ctx_err), - ENA_STAT_TX_ENTRY(linearize), - ENA_STAT_TX_ENTRY(linearize_failed), ENA_STAT_TX_ENTRY(tx_poll), ENA_STAT_TX_ENTRY(doorbells), ENA_STAT_TX_ENTRY(bad_req_id), @@ -106,7 +99,9 @@ static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(cnt), ENA_STAT_RX_ENTRY(bytes), ENA_STAT_RX_ENTRY(refill_partial), - ENA_STAT_RX_ENTRY(bad_csum), + ENA_STAT_RX_ENTRY(l3_csum_bad), + ENA_STAT_RX_ENTRY(l4_csum_bad), + ENA_STAT_RX_ENTRY(l4_csum_good), ENA_STAT_RX_ENTRY(mbuf_alloc_fail), ENA_STAT_RX_ENTRY(bad_desc_num), ENA_STAT_RX_ENTRY(bad_req_id), @@ -165,10 +160,9 @@ static const struct rte_pci_id pci_id_ena_map[] = { static struct ena_aenq_handlers aenq_handlers; -static int ena_device_init(struct ena_com_dev *ena_dev, +static int ena_device_init(struct ena_adapter *adapter, struct rte_pci_device *pdev, - struct ena_com_dev_get_features_ctx *get_feat_ctx, - bool *wd_state); + struct ena_com_dev_get_features_ctx *get_feat_ctx); static int ena_dev_configure(struct rte_eth_dev *dev); static void ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, @@ -254,6 +248,7 @@ static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); +static int ena_configure_aenq(struct ena_adapter *adapter); static const struct eth_dev_ops ena_dev_ops = { .dev_configure = ena_dev_configure, @@ -280,10 +275,12 @@ static const struct eth_dev_ops ena_dev_ops = { .rss_hash_conf_get = ena_rss_hash_conf_get, }; -static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, +static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring, + struct rte_mbuf *mbuf, struct ena_com_rx_ctx *ena_rx_ctx, bool fill_hash) { + struct ena_stats_rx *rx_stats = &rx_ring->rx_stats; uint64_t ol_flags = 0; uint32_t packet_type = 0; @@ -294,21 +291,27 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { packet_type |= RTE_PTYPE_L3_IPV4; - if (unlikely(ena_rx_ctx->l3_csum_err)) + if (unlikely(ena_rx_ctx->l3_csum_err)) { + ++rx_stats->l3_csum_bad; ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; - else + } else { ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + } } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { packet_type |= RTE_PTYPE_L3_IPV6; } - if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) + if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) { ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; - else - if (unlikely(ena_rx_ctx->l4_csum_err)) + } else { + if (unlikely(ena_rx_ctx->l4_csum_err)) { + ++rx_stats->l4_csum_bad; ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; - else + } else { + ++rx_stats->l4_csum_good; ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + } + } if (fill_hash && likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { @@ -1413,11 +1416,11 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) return i; } -static int ena_device_init(struct ena_com_dev *ena_dev, +static int ena_device_init(struct ena_adapter *adapter, struct rte_pci_device *pdev, - struct ena_com_dev_get_features_ctx *get_feat_ctx, - bool *wd_state) + struct ena_com_dev_get_features_ctx *get_feat_ctx) { + struct ena_com_dev *ena_dev = &adapter->ena_dev; uint32_t aenq_groups; int rc; bool readless_supported; @@ -1482,13 +1485,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, BIT(ENA_ADMIN_WARNING); aenq_groups &= get_feat_ctx->aenq.supported_groups; - rc = ena_com_set_aenq_config(ena_dev, aenq_groups); - if (rc) { - PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc: %d\n", rc); - goto err_admin_init; - } - *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); + adapter->all_aenq_groups = aenq_groups; return 0; @@ -1514,7 +1512,7 @@ static void ena_interrupt_handler_rte(void *cb_arg) static void check_for_missing_keep_alive(struct ena_adapter *adapter) { - if (!adapter->wd_state) + if (!(adapter->active_aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE))) return; if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) @@ -1626,6 +1624,9 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, struct rte_eth_dev *dev = arg; struct ena_adapter *adapter = dev->data->dev_private; + if (unlikely(adapter->trigger_reset)) + return; + check_for_missing_keep_alive(adapter); check_for_admin_com_state(adapter); check_for_tx_completions(adapter); @@ -1795,7 +1796,6 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) int rc; static int adapters_found; bool disable_meta_caching; - bool wd_state = false; eth_dev->dev_ops = &ena_dev_ops; eth_dev->rx_pkt_burst = ð_ena_recv_pkts; @@ -1847,12 +1847,15 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) } /* device specific initialization routine */ - rc = ena_device_init(ena_dev, pci_dev, &get_feat_ctx, &wd_state); + rc = ena_device_init(adapter, pci_dev, &get_feat_ctx); if (rc) { PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); goto err; } - adapter->wd_state = wd_state; + + /* Check if device supports LSC */ + if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) + adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, adapter->use_large_llq_hdr); @@ -1996,6 +1999,7 @@ static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) static int ena_dev_configure(struct rte_eth_dev *dev) { struct ena_adapter *adapter = dev->data->dev_private; + int rc; adapter->state = ENA_ADAPTER_STATE_CONFIG; @@ -2022,10 +2026,9 @@ static int ena_dev_configure(struct rte_eth_dev *dev) */ adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2; - adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; - adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; + rc = ena_configure_aenq(adapter); - return 0; + return rc; } static void ena_init_rings(struct ena_adapter *adapter, @@ -2346,13 +2349,11 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, } /* fill mbuf attributes if any */ - ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx, fill_hash); + ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash); if (unlikely(mbuf->ol_flags & - (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) { + (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); - ++rx_ring->rx_stats.bad_csum; - } rx_pkts[completed] = mbuf; rx_ring->rx_stats.bytes += mbuf->pkt_len; @@ -2408,6 +2409,17 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return i; } + if (unlikely(m->nb_segs >= tx_ring->sgl_size && + !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && + m->nb_segs == tx_ring->sgl_size && + m->data_len < tx_ring->tx_max_header_size))) { + PMD_TX_LOG(DEBUG, + "mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n", + i, m->nb_segs); + rte_errno = EINVAL; + return i; + } + #ifdef RTE_LIBRTE_ETHDEV_DEBUG /* Check if requested offload is also enabled for the queue */ if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM && @@ -2554,56 +2566,6 @@ static void ena_update_hints(struct ena_adapter *adapter, } } -static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring, - struct rte_mbuf *mbuf) -{ - struct ena_com_dev *ena_dev; - int num_segments, header_len, rc; - - ena_dev = &tx_ring->adapter->ena_dev; - num_segments = mbuf->nb_segs; - header_len = mbuf->data_len; - - if (likely(num_segments < tx_ring->sgl_size)) - goto checkspace; - - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && - (num_segments == tx_ring->sgl_size) && - (header_len < tx_ring->tx_max_header_size)) - goto checkspace; - - /* Checking for space for 2 additional metadata descriptors due to - * possible header split and metadata descriptor. Linearization will - * be needed so we reduce the segments number from num_segments to 1 - */ - if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) { - PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); - return ENA_COM_NO_MEM; - } - ++tx_ring->tx_stats.linearize; - rc = rte_pktmbuf_linearize(mbuf); - if (unlikely(rc)) { - PMD_TX_LOG(WARNING, "Mbuf linearize failed\n"); - rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); - ++tx_ring->tx_stats.linearize_failed; - return rc; - } - - return 0; - -checkspace: - /* Checking for space for 2 additional metadata descriptors due to - * possible header split and metadata descriptor - */ - if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, - num_segments + 2)) { - PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); - return ENA_COM_NO_MEM; - } - - return 0; -} - static void ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, struct rte_mbuf *mbuf, @@ -2688,15 +2650,21 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) int nb_hw_desc; int rc; - rc = ena_check_space_and_linearize_mbuf(tx_ring, mbuf); - if (unlikely(rc)) - return rc; + /* Checking for space for 2 additional metadata descriptors due to + * possible header split and metadata descriptor + */ + if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + mbuf->nb_segs + 2)) { + PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n"); + return ENA_COM_NO_MEM; + } next_to_use = tx_ring->next_to_use; req_id = tx_ring->empty_tx_reqs[next_to_use]; tx_info = &tx_ring->tx_buffer_info[req_id]; tx_info->num_of_bufs = 0; + RTE_ASSERT(tx_info->mbuf == NULL); ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); @@ -2808,6 +2776,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } #endif + available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); + if (available_desc < tx_ring->tx_free_thresh) + ena_tx_cleanup(tx_ring); + for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) break; @@ -2816,9 +2788,6 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_ring->size_mask)]); } - available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); - tx_ring->tx_stats.available_desc = available_desc; - /* If there are ready packets to be xmitted... */ if (likely(tx_ring->pkts_without_db)) { /* ...let HW do its best :-) */ @@ -2827,9 +2796,6 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_ring->pkts_without_db = false; } - if (available_desc < tx_ring->tx_free_thresh) - ena_tx_cleanup(tx_ring); - tx_ring->tx_stats.available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); tx_ring->tx_stats.tx_poll++; @@ -3200,6 +3166,38 @@ static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, return 0; } +static int ena_configure_aenq(struct ena_adapter *adapter) +{ + uint32_t aenq_groups = adapter->all_aenq_groups; + int rc; + + /* All_aenq_groups holds all AENQ functions supported by the device and + * the HW, so at first we need to be sure the LSC request is valid. + */ + if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) { + if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) { + PMD_DRV_LOG(ERR, + "LSC requested, but it's not supported by the AENQ\n"); + return -EINVAL; + } + } else { + /* If LSC wasn't enabled by the app, let's enable all supported + * AENQ procedures except the LSC. + */ + aenq_groups &= ~BIT(ENA_ADMIN_LINK_CHANGE); + } + + rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups); + if (rc != 0) { + PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc); + return rc; + } + + adapter->active_aenq_groups = aenq_groups; + + return 0; +} + /********************************************************************* * PMD configuration *********************************************************************/