X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fena%2Fena_ethdev.c;h=ed3dd162ba58595a587b61926ac6b5705a299fd3;hb=67216c31e43d1f09bc02493465d578292f2d9b7a;hp=25ba6a9e776cc2bcd2cb0be6e406981f0b6965ca;hpb=f93e20e5161a9bb7d117e64b926ac54739ad1fa5;p=dpdk.git diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 25ba6a9e77..ed3dd162ba 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -21,7 +21,7 @@ #include #define DRV_MODULE_VER_MAJOR 2 -#define DRV_MODULE_VER_MINOR 4 +#define DRV_MODULE_VER_MINOR 5 #define DRV_MODULE_VER_SUBMINOR 0 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) @@ -38,11 +38,6 @@ #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) -enum ethtool_stringset { - ETH_SS_TEST = 0, - ETH_SS_STATS, -}; - struct ena_stats { char name[ETH_GSTRING_LEN]; int stat_offset; @@ -93,8 +88,6 @@ static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(cnt), ENA_STAT_TX_ENTRY(bytes), ENA_STAT_TX_ENTRY(prepare_ctx_err), - ENA_STAT_TX_ENTRY(linearize), - ENA_STAT_TX_ENTRY(linearize_failed), ENA_STAT_TX_ENTRY(tx_poll), ENA_STAT_TX_ENTRY(doorbells), ENA_STAT_TX_ENTRY(bad_req_id), @@ -106,7 +99,9 @@ static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(cnt), ENA_STAT_RX_ENTRY(bytes), ENA_STAT_RX_ENTRY(refill_partial), - ENA_STAT_RX_ENTRY(bad_csum), + ENA_STAT_RX_ENTRY(l3_csum_bad), + ENA_STAT_RX_ENTRY(l4_csum_bad), + ENA_STAT_RX_ENTRY(l4_csum_good), ENA_STAT_RX_ENTRY(mbuf_alloc_fail), ENA_STAT_RX_ENTRY(bad_desc_num), ENA_STAT_RX_ENTRY(bad_req_id), @@ -117,13 +112,13 @@ static const struct ena_stats ena_stats_rx_strings[] = { #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) -#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ - DEV_TX_OFFLOAD_UDP_CKSUM |\ - DEV_TX_OFFLOAD_IPV4_CKSUM |\ - DEV_TX_OFFLOAD_TCP_TSO) -#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ - PKT_TX_IP_CKSUM |\ - PKT_TX_TCP_SEG) +#define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\ + RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\ + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\ + RTE_ETH_TX_OFFLOAD_TCP_TSO) +#define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\ + RTE_MBUF_F_TX_IP_CKSUM |\ + RTE_MBUF_F_TX_TCP_SEG) /** Vendor ID used by Amazon devices */ #define PCI_VENDOR_ID_AMAZON 0x1D0F @@ -131,15 +126,14 @@ static const struct ena_stats ena_stats_rx_strings[] = { #define PCI_DEVICE_ID_ENA_VF 0xEC20 #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21 -#define ENA_TX_OFFLOAD_MASK (\ - PKT_TX_L4_MASK | \ - PKT_TX_IPV6 | \ - PKT_TX_IPV4 | \ - PKT_TX_IP_CKSUM | \ - PKT_TX_TCP_SEG) +#define ENA_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_IPV6 | \ + RTE_MBUF_F_TX_IPV4 | \ + RTE_MBUF_F_TX_IP_CKSUM | \ + RTE_MBUF_F_TX_TCP_SEG) #define ENA_TX_OFFLOAD_NOTSUP_MASK \ - (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) + (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) /** HW specific offloads capabilities. */ /* IPv4 checksum offload. */ @@ -166,10 +160,9 @@ static const struct rte_pci_id pci_id_ena_map[] = { static struct ena_aenq_handlers aenq_handlers; -static int ena_device_init(struct ena_com_dev *ena_dev, +static int ena_device_init(struct ena_adapter *adapter, struct rte_pci_device *pdev, - struct ena_com_dev_get_features_ctx *get_feat_ctx, - bool *wd_state); + struct ena_com_dev_get_features_ctx *get_feat_ctx); static int ena_dev_configure(struct rte_eth_dev *dev); static void ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, @@ -255,6 +248,7 @@ static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); +static int ena_configure_aenq(struct ena_adapter *adapter); static const struct eth_dev_ops ena_dev_ops = { .dev_configure = ena_dev_configure, @@ -281,10 +275,12 @@ static const struct eth_dev_ops ena_dev_ops = { .rss_hash_conf_get = ena_rss_hash_conf_get, }; -static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, +static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring, + struct rte_mbuf *mbuf, struct ena_com_rx_ctx *ena_rx_ctx, bool fill_hash) { + struct ena_stats_rx *rx_stats = &rx_ring->rx_stats; uint64_t ol_flags = 0; uint32_t packet_type = 0; @@ -295,25 +291,31 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { packet_type |= RTE_PTYPE_L3_IPV4; - if (unlikely(ena_rx_ctx->l3_csum_err)) - ol_flags |= PKT_RX_IP_CKSUM_BAD; - else - ol_flags |= PKT_RX_IP_CKSUM_GOOD; + if (unlikely(ena_rx_ctx->l3_csum_err)) { + ++rx_stats->l3_csum_bad; + ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; + } else { + ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + } } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { packet_type |= RTE_PTYPE_L3_IPV6; } - if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) - ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; - else - if (unlikely(ena_rx_ctx->l4_csum_err)) - ol_flags |= PKT_RX_L4_CKSUM_BAD; - else - ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) { + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; + } else { + if (unlikely(ena_rx_ctx->l4_csum_err)) { + ++rx_stats->l4_csum_bad; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; + } else { + ++rx_stats->l4_csum_good; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + } + } if (fill_hash && likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { - ol_flags |= PKT_RX_RSS_HASH; + ol_flags |= RTE_MBUF_F_RX_RSS_HASH; mbuf->hash.rss = ena_rx_ctx->hash; } @@ -331,19 +333,19 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, if ((mbuf->ol_flags & MBUF_OFFLOADS) && (queue_offloads & QUEUE_OFFLOADS)) { /* check if TSO is required */ - if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && - (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { + if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) && + (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) { ena_tx_ctx->tso_enable = true; ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); } /* check if L3 checksum is needed */ - if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && - (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) + if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && + (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ena_tx_ctx->l3_csum_enable = true; - if (mbuf->ol_flags & PKT_TX_IPV6) { + if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) { ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; } else { ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; @@ -356,13 +358,13 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, } /* check if L4 checksum is needed */ - if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) && - (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { + if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) && + (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) { ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; ena_tx_ctx->l4_csum_enable = true; - } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) == - PKT_TX_UDP_CKSUM) && - (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { + } else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == + RTE_MBUF_F_TX_UDP_CKSUM) && + (queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) { ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; ena_tx_ctx->l4_csum_enable = true; } else { @@ -495,7 +497,7 @@ err: static int ena_close(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct ena_adapter *adapter = dev->data->dev_private; int ret = 0; @@ -568,16 +570,13 @@ static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) struct ena_ring *ring = dev->data->rx_queues[qid]; /* Free ring resources */ - if (ring->rx_buffer_info) - rte_free(ring->rx_buffer_info); + rte_free(ring->rx_buffer_info); ring->rx_buffer_info = NULL; - if (ring->rx_refill_buffer) - rte_free(ring->rx_refill_buffer); + rte_free(ring->rx_refill_buffer); ring->rx_refill_buffer = NULL; - if (ring->empty_rx_reqs) - rte_free(ring->empty_rx_reqs); + rte_free(ring->empty_rx_reqs); ring->empty_rx_reqs = NULL; ring->configured = 0; @@ -591,14 +590,11 @@ static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) struct ena_ring *ring = dev->data->tx_queues[qid]; /* Free ring resources */ - if (ring->push_buf_intermediate_buf) - rte_free(ring->push_buf_intermediate_buf); + rte_free(ring->push_buf_intermediate_buf); - if (ring->tx_buffer_info) - rte_free(ring->tx_buffer_info); + rte_free(ring->tx_buffer_info); - if (ring->empty_tx_reqs) - rte_free(ring->empty_tx_reqs); + rte_free(ring->empty_tx_reqs); ring->empty_tx_reqs = NULL; ring->tx_buffer_info = NULL; @@ -643,9 +639,9 @@ static int ena_link_update(struct rte_eth_dev *dev, struct rte_eth_link *link = &dev->data->dev_link; struct ena_adapter *adapter = dev->data->dev_private; - link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; - link->link_speed = ETH_SPEED_NUM_NONE; - link->link_duplex = ETH_LINK_FULL_DUPLEX; + link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; + link->link_speed = RTE_ETH_SPEED_NUM_NONE; + link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; return 0; } @@ -923,7 +919,7 @@ static int ena_start(struct rte_eth_dev *dev) if (rc) goto err_start_tx; - if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { rc = ena_rss_configure(adapter); if (rc) goto err_rss_init; @@ -955,7 +951,7 @@ static int ena_stop(struct rte_eth_dev *dev) struct ena_adapter *adapter = dev->data->dev_private; struct ena_com_dev *ena_dev = &adapter->ena_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int rc; /* Cannot free memory in secondary process */ @@ -977,10 +973,9 @@ static int ena_stop(struct rte_eth_dev *dev) rte_intr_disable(intr_handle); rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec != NULL) { - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } + + /* Cleanup vector list */ + rte_intr_vec_list_free(intr_handle); rte_intr_enable(intr_handle); @@ -996,7 +991,7 @@ static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) struct ena_adapter *adapter = ring->adapter; struct ena_com_dev *ena_dev = &adapter->ena_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct ena_com_create_io_ctx ctx = /* policy set to _HOST just to satisfy icc compiler */ { ENA_ADMIN_PLACEMENT_POLICY_HOST, @@ -1016,7 +1011,10 @@ static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) ena_qid = ENA_IO_RXQ_IDX(ring->id); ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; if (rte_intr_dp_is_en(intr_handle)) - ctx.msix_vector = intr_handle->intr_vec[ring->id]; + ctx.msix_vector = + rte_intr_vec_list_index_get(intr_handle, + ring->id); + for (i = 0; i < ring->ring_size; i++) ring->empty_rx_reqs[i] = i; } @@ -1407,7 +1405,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) ++rxq->rx_stats.refill_partial; } - /* When we submitted free recources to device... */ + /* When we submitted free resources to device... */ if (likely(i > 0)) { /* ...let HW know that it can fill buffers with data. */ ena_com_write_sq_doorbell(rxq->ena_com_io_sq); @@ -1418,11 +1416,11 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) return i; } -static int ena_device_init(struct ena_com_dev *ena_dev, +static int ena_device_init(struct ena_adapter *adapter, struct rte_pci_device *pdev, - struct ena_com_dev_get_features_ctx *get_feat_ctx, - bool *wd_state) + struct ena_com_dev_get_features_ctx *get_feat_ctx) { + struct ena_com_dev *ena_dev = &adapter->ena_dev; uint32_t aenq_groups; int rc; bool readless_supported; @@ -1487,13 +1485,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, BIT(ENA_ADMIN_WARNING); aenq_groups &= get_feat_ctx->aenq.supported_groups; - rc = ena_com_set_aenq_config(ena_dev, aenq_groups); - if (rc) { - PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc: %d\n", rc); - goto err_admin_init; - } - *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); + adapter->all_aenq_groups = aenq_groups; return 0; @@ -1519,7 +1512,7 @@ static void ena_interrupt_handler_rte(void *cb_arg) static void check_for_missing_keep_alive(struct ena_adapter *adapter) { - if (!adapter->wd_state) + if (!(adapter->active_aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE))) return; if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) @@ -1631,6 +1624,9 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, struct rte_eth_dev *dev = arg; struct ena_adapter *adapter = dev->data->dev_private; + if (unlikely(adapter->trigger_reset)) + return; + check_for_missing_keep_alive(adapter); check_for_admin_com_state(adapter); check_for_tx_completions(adapter); @@ -1800,7 +1796,6 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) int rc; static int adapters_found; bool disable_meta_caching; - bool wd_state = false; eth_dev->dev_ops = &ena_dev_ops; eth_dev->rx_pkt_burst = ð_ena_recv_pkts; @@ -1825,7 +1820,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) pci_dev->addr.devid, pci_dev->addr.function); - intr_handle = &pci_dev->intr_handle; + intr_handle = pci_dev->intr_handle; adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; @@ -1852,12 +1847,15 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) } /* device specific initialization routine */ - rc = ena_device_init(ena_dev, pci_dev, &get_feat_ctx, &wd_state); + rc = ena_device_init(adapter, pci_dev, &get_feat_ctx); if (rc) { PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); goto err; } - adapter->wd_state = wd_state; + + /* Check if device supports LSC */ + if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) + adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, adapter->use_large_llq_hdr); @@ -2001,12 +1999,13 @@ static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) static int ena_dev_configure(struct rte_eth_dev *dev) { struct ena_adapter *adapter = dev->data->dev_private; + int rc; adapter->state = ENA_ADAPTER_STATE_CONFIG; - if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) - dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; - dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; + dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; /* Scattered Rx cannot be turned off in the HW, so this capability must * be forced. @@ -2027,10 +2026,9 @@ static int ena_dev_configure(struct rte_eth_dev *dev) */ adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2; - adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; - adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; + rc = ena_configure_aenq(adapter); - return 0; + return rc; } static void ena_init_rings(struct ena_adapter *adapter, @@ -2067,17 +2065,17 @@ static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter) uint64_t port_offloads = 0; if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM) - port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM; + port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM; if (adapter->offloads.rx_offloads & (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM)) port_offloads |= - DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM; if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH) - port_offloads |= DEV_RX_OFFLOAD_RSS_HASH; + port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; - port_offloads |= DEV_RX_OFFLOAD_SCATTER; + port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; return port_offloads; } @@ -2087,17 +2085,17 @@ static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter) uint64_t port_offloads = 0; if (adapter->offloads.tx_offloads & ENA_IPV4_TSO) - port_offloads |= DEV_TX_OFFLOAD_TCP_TSO; + port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO; if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM) - port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; + port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; if (adapter->offloads.tx_offloads & (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL)) port_offloads |= - DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM; + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM; - port_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; return port_offloads; } @@ -2130,14 +2128,14 @@ static int ena_infos_get(struct rte_eth_dev *dev, ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); dev_info->speed_capa = - ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_2_5G | - ETH_LINK_SPEED_5G | - ETH_LINK_SPEED_10G | - ETH_LINK_SPEED_25G | - ETH_LINK_SPEED_40G | - ETH_LINK_SPEED_50G | - ETH_LINK_SPEED_100G; + RTE_ETH_LINK_SPEED_1G | + RTE_ETH_LINK_SPEED_2_5G | + RTE_ETH_LINK_SPEED_5G | + RTE_ETH_LINK_SPEED_10G | + RTE_ETH_LINK_SPEED_25G | + RTE_ETH_LINK_SPEED_40G | + RTE_ETH_LINK_SPEED_50G | + RTE_ETH_LINK_SPEED_100G; /* Inform framework about available features */ dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter); @@ -2303,7 +2301,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, } #endif - fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH; + fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH; descs_in_use = rx_ring->ring_size - ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; @@ -2351,13 +2349,11 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, } /* fill mbuf attributes if any */ - ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx, fill_hash); + ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash); if (unlikely(mbuf->ol_flags & - (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) { + (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); - ++rx_ring->rx_stats.bad_csum; - } rx_pkts[completed] = mbuf; rx_ring->rx_stats.bytes += mbuf->pkt_len; @@ -2402,10 +2398,10 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (ol_flags == 0) continue; - l4_csum_flag = ol_flags & PKT_TX_L4_MASK; + l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK; /* SCTP checksum offload is not supported by the ENA. */ if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) || - l4_csum_flag == PKT_TX_SCTP_CKSUM) { + l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) { PMD_TX_LOG(DEBUG, "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n", i, ol_flags); @@ -2413,14 +2409,25 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return i; } + if (unlikely(m->nb_segs >= tx_ring->sgl_size && + !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && + m->nb_segs == tx_ring->sgl_size && + m->data_len < tx_ring->tx_max_header_size))) { + PMD_TX_LOG(DEBUG, + "mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n", + i, m->nb_segs); + rte_errno = EINVAL; + return i; + } + #ifdef RTE_LIBRTE_ETHDEV_DEBUG /* Check if requested offload is also enabled for the queue */ - if ((ol_flags & PKT_TX_IP_CKSUM && - !(tx_ring->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) || - (l4_csum_flag == PKT_TX_TCP_CKSUM && - !(tx_ring->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) || - (l4_csum_flag == PKT_TX_UDP_CKSUM && - !(tx_ring->offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) { + if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM && + !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) || + (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM && + !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) || + (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM && + !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) { PMD_TX_LOG(DEBUG, "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n", i, m->nb_segs, tx_ring->id); @@ -2431,7 +2438,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* The caller is obligated to set l2 and l3 len if any cksum * offload is enabled. */ - if (unlikely(ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK) && + if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) && (m->l2_len == 0 || m->l3_len == 0))) { PMD_TX_LOG(DEBUG, "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n", @@ -2450,14 +2457,14 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * pseudo header checksum is needed. */ need_pseudo_csum = false; - if (ol_flags & PKT_TX_IPV4) { - if (ol_flags & PKT_TX_IP_CKSUM && + if (ol_flags & RTE_MBUF_F_TX_IPV4) { + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM && !(dev_offload_capa & ENA_L3_IPV4_CSUM)) { rte_errno = ENOTSUP; return i; } - if (ol_flags & PKT_TX_TCP_SEG && + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && !(dev_offload_capa & ENA_IPV4_TSO)) { rte_errno = ENOTSUP; return i; @@ -2466,7 +2473,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Check HW capabilities and if pseudo csum is needed * for L4 offloads. */ - if (l4_csum_flag != PKT_TX_L4_NO_CKSUM && + if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && !(dev_offload_capa & ENA_L4_IPV4_CSUM)) { if (dev_offload_capa & ENA_L4_IPV4_CSUM_PARTIAL) { @@ -2483,22 +2490,22 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); if (frag_field & RTE_IPV4_HDR_DF_FLAG) { m->packet_type |= RTE_PTYPE_L4_NONFRAG; - } else if (ol_flags & PKT_TX_TCP_SEG) { + } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { /* In case we are supposed to TSO and have DF * not set (DF=0) hardware must be provided with * partial checksum. */ need_pseudo_csum = true; } - } else if (ol_flags & PKT_TX_IPV6) { + } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { /* There is no support for IPv6 TSO as for now. */ - if (ol_flags & PKT_TX_TCP_SEG) { + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { rte_errno = ENOTSUP; return i; } /* Check HW capabilities and if pseudo csum is needed */ - if (l4_csum_flag != PKT_TX_L4_NO_CKSUM && + if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && !(dev_offload_capa & ENA_L4_IPV6_CSUM)) { if (dev_offload_capa & ENA_L4_IPV6_CSUM_PARTIAL) { @@ -2559,56 +2566,6 @@ static void ena_update_hints(struct ena_adapter *adapter, } } -static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring, - struct rte_mbuf *mbuf) -{ - struct ena_com_dev *ena_dev; - int num_segments, header_len, rc; - - ena_dev = &tx_ring->adapter->ena_dev; - num_segments = mbuf->nb_segs; - header_len = mbuf->data_len; - - if (likely(num_segments < tx_ring->sgl_size)) - goto checkspace; - - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && - (num_segments == tx_ring->sgl_size) && - (header_len < tx_ring->tx_max_header_size)) - goto checkspace; - - /* Checking for space for 2 additional metadata descriptors due to - * possible header split and metadata descriptor. Linearization will - * be needed so we reduce the segments number from num_segments to 1 - */ - if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) { - PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); - return ENA_COM_NO_MEM; - } - ++tx_ring->tx_stats.linearize; - rc = rte_pktmbuf_linearize(mbuf); - if (unlikely(rc)) { - PMD_TX_LOG(WARNING, "Mbuf linearize failed\n"); - rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); - ++tx_ring->tx_stats.linearize_failed; - return rc; - } - - return 0; - -checkspace: - /* Checking for space for 2 additional metadata descriptors due to - * possible header split and metadata descriptor - */ - if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, - num_segments + 2)) { - PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); - return ENA_COM_NO_MEM; - } - - return 0; -} - static void ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, struct rte_mbuf *mbuf, @@ -2693,15 +2650,21 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) int nb_hw_desc; int rc; - rc = ena_check_space_and_linearize_mbuf(tx_ring, mbuf); - if (unlikely(rc)) - return rc; + /* Checking for space for 2 additional metadata descriptors due to + * possible header split and metadata descriptor + */ + if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + mbuf->nb_segs + 2)) { + PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n"); + return ENA_COM_NO_MEM; + } next_to_use = tx_ring->next_to_use; req_id = tx_ring->empty_tx_reqs[next_to_use]; tx_info = &tx_ring->tx_buffer_info[req_id]; tx_info->num_of_bufs = 0; + RTE_ASSERT(tx_info->mbuf == NULL); ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); @@ -2813,6 +2776,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } #endif + available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); + if (available_desc < tx_ring->tx_free_thresh) + ena_tx_cleanup(tx_ring); + for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) break; @@ -2821,9 +2788,6 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_ring->size_mask)]); } - available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); - tx_ring->tx_stats.available_desc = available_desc; - /* If there are ready packets to be xmitted... */ if (likely(tx_ring->pkts_without_db)) { /* ...let HW do its best :-) */ @@ -2832,9 +2796,6 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_ring->pkts_without_db = false; } - if (available_desc < tx_ring->tx_free_thresh) - ena_tx_cleanup(tx_ring); - tx_ring->tx_stats.available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); tx_ring->tx_stats.tx_poll++; @@ -3113,7 +3074,7 @@ static int ena_parse_devargs(struct ena_adapter *adapter, static int ena_setup_rx_intr(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int rc; uint16_t vectors_nb, i; bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq; @@ -3140,9 +3101,9 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev) goto enable_intr; } - intr_handle->intr_vec = rte_zmalloc("intr_vec", - dev->data->nb_rx_queues * sizeof(*intr_handle->intr_vec), 0); - if (intr_handle->intr_vec == NULL) { + /* Allocate the vector list */ + if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", + dev->data->nb_rx_queues)) { PMD_DRV_LOG(ERR, "Failed to allocate interrupt vector for %d queues\n", dev->data->nb_rx_queues); @@ -3161,7 +3122,9 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev) } for (i = 0; i < vectors_nb; ++i) - intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i; + if (rte_intr_vec_list_index_set(intr_handle, i, + RTE_INTR_VEC_RXTX_OFFSET + i)) + goto disable_intr_efd; rte_intr_enable(intr_handle); return 0; @@ -3169,8 +3132,7 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev) disable_intr_efd: rte_intr_efd_disable(intr_handle); free_intr_vec: - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; + rte_intr_vec_list_free(intr_handle); enable_intr: rte_intr_enable(intr_handle); return rc; @@ -3204,6 +3166,38 @@ static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, return 0; } +static int ena_configure_aenq(struct ena_adapter *adapter) +{ + uint32_t aenq_groups = adapter->all_aenq_groups; + int rc; + + /* All_aenq_groups holds all AENQ functions supported by the device and + * the HW, so at first we need to be sure the LSC request is valid. + */ + if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) { + if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) { + PMD_DRV_LOG(ERR, + "LSC requested, but it's not supported by the AENQ\n"); + return -EINVAL; + } + } else { + /* If LSC wasn't enabled by the app, let's enable all supported + * AENQ procedures except the LSC. + */ + aenq_groups &= ~BIT(ENA_ADMIN_LINK_CHANGE); + } + + rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups); + if (rc != 0) { + PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc); + return rc; + } + + adapter->active_aenq_groups = aenq_groups; + + return 0; +} + /********************************************************************* * PMD configuration *********************************************************************/