X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fena%2Fena_ethdev.c;h=deaee30664227fea6ea583656909ed36abfb1373;hb=96ffa8a70f31c5988075b9dfc16ad5faed399257;hp=25ba6a9e776cc2bcd2cb0be6e406981f0b6965ca;hpb=f93e20e5161a9bb7d117e64b926ac54739ad1fa5;p=dpdk.git diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 25ba6a9e77..deaee30664 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -21,7 +21,7 @@ #include #define DRV_MODULE_VER_MAJOR 2 -#define DRV_MODULE_VER_MINOR 4 +#define DRV_MODULE_VER_MINOR 5 #define DRV_MODULE_VER_SUBMINOR 0 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) @@ -93,8 +93,6 @@ static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(cnt), ENA_STAT_TX_ENTRY(bytes), ENA_STAT_TX_ENTRY(prepare_ctx_err), - ENA_STAT_TX_ENTRY(linearize), - ENA_STAT_TX_ENTRY(linearize_failed), ENA_STAT_TX_ENTRY(tx_poll), ENA_STAT_TX_ENTRY(doorbells), ENA_STAT_TX_ENTRY(bad_req_id), @@ -117,13 +115,13 @@ static const struct ena_stats ena_stats_rx_strings[] = { #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) -#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ - DEV_TX_OFFLOAD_UDP_CKSUM |\ - DEV_TX_OFFLOAD_IPV4_CKSUM |\ - DEV_TX_OFFLOAD_TCP_TSO) -#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ - PKT_TX_IP_CKSUM |\ - PKT_TX_TCP_SEG) +#define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\ + RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\ + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\ + RTE_ETH_TX_OFFLOAD_TCP_TSO) +#define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\ + RTE_MBUF_F_TX_IP_CKSUM |\ + RTE_MBUF_F_TX_TCP_SEG) /** Vendor ID used by Amazon devices */ #define PCI_VENDOR_ID_AMAZON 0x1D0F @@ -131,15 +129,14 @@ static const struct ena_stats ena_stats_rx_strings[] = { #define PCI_DEVICE_ID_ENA_VF 0xEC20 #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21 -#define ENA_TX_OFFLOAD_MASK (\ - PKT_TX_L4_MASK | \ - PKT_TX_IPV6 | \ - PKT_TX_IPV4 | \ - PKT_TX_IP_CKSUM | \ - PKT_TX_TCP_SEG) +#define ENA_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_IPV6 | \ + RTE_MBUF_F_TX_IPV4 | \ + RTE_MBUF_F_TX_IP_CKSUM | \ + RTE_MBUF_F_TX_TCP_SEG) #define ENA_TX_OFFLOAD_NOTSUP_MASK \ - (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) + (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) /** HW specific offloads capabilities. */ /* IPv4 checksum offload. */ @@ -296,24 +293,24 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { packet_type |= RTE_PTYPE_L3_IPV4; if (unlikely(ena_rx_ctx->l3_csum_err)) - ol_flags |= PKT_RX_IP_CKSUM_BAD; + ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; else - ol_flags |= PKT_RX_IP_CKSUM_GOOD; + ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { packet_type |= RTE_PTYPE_L3_IPV6; } if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) - ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; else if (unlikely(ena_rx_ctx->l4_csum_err)) - ol_flags |= PKT_RX_L4_CKSUM_BAD; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; else - ol_flags |= PKT_RX_L4_CKSUM_GOOD; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; if (fill_hash && likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { - ol_flags |= PKT_RX_RSS_HASH; + ol_flags |= RTE_MBUF_F_RX_RSS_HASH; mbuf->hash.rss = ena_rx_ctx->hash; } @@ -331,19 +328,19 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, if ((mbuf->ol_flags & MBUF_OFFLOADS) && (queue_offloads & QUEUE_OFFLOADS)) { /* check if TSO is required */ - if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && - (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { + if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) && + (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) { ena_tx_ctx->tso_enable = true; ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); } /* check if L3 checksum is needed */ - if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && - (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) + if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && + (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ena_tx_ctx->l3_csum_enable = true; - if (mbuf->ol_flags & PKT_TX_IPV6) { + if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) { ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; } else { ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; @@ -356,13 +353,13 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, } /* check if L4 checksum is needed */ - if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) && - (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { + if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) && + (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) { ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; ena_tx_ctx->l4_csum_enable = true; - } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) == - PKT_TX_UDP_CKSUM) && - (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { + } else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == + RTE_MBUF_F_TX_UDP_CKSUM) && + (queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) { ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; ena_tx_ctx->l4_csum_enable = true; } else { @@ -495,7 +492,7 @@ err: static int ena_close(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct ena_adapter *adapter = dev->data->dev_private; int ret = 0; @@ -568,16 +565,13 @@ static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) struct ena_ring *ring = dev->data->rx_queues[qid]; /* Free ring resources */ - if (ring->rx_buffer_info) - rte_free(ring->rx_buffer_info); + rte_free(ring->rx_buffer_info); ring->rx_buffer_info = NULL; - if (ring->rx_refill_buffer) - rte_free(ring->rx_refill_buffer); + rte_free(ring->rx_refill_buffer); ring->rx_refill_buffer = NULL; - if (ring->empty_rx_reqs) - rte_free(ring->empty_rx_reqs); + rte_free(ring->empty_rx_reqs); ring->empty_rx_reqs = NULL; ring->configured = 0; @@ -591,14 +585,11 @@ static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) struct ena_ring *ring = dev->data->tx_queues[qid]; /* Free ring resources */ - if (ring->push_buf_intermediate_buf) - rte_free(ring->push_buf_intermediate_buf); + rte_free(ring->push_buf_intermediate_buf); - if (ring->tx_buffer_info) - rte_free(ring->tx_buffer_info); + rte_free(ring->tx_buffer_info); - if (ring->empty_tx_reqs) - rte_free(ring->empty_tx_reqs); + rte_free(ring->empty_tx_reqs); ring->empty_tx_reqs = NULL; ring->tx_buffer_info = NULL; @@ -643,9 +634,9 @@ static int ena_link_update(struct rte_eth_dev *dev, struct rte_eth_link *link = &dev->data->dev_link; struct ena_adapter *adapter = dev->data->dev_private; - link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; - link->link_speed = ETH_SPEED_NUM_NONE; - link->link_duplex = ETH_LINK_FULL_DUPLEX; + link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; + link->link_speed = RTE_ETH_SPEED_NUM_NONE; + link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; return 0; } @@ -923,7 +914,7 @@ static int ena_start(struct rte_eth_dev *dev) if (rc) goto err_start_tx; - if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { rc = ena_rss_configure(adapter); if (rc) goto err_rss_init; @@ -955,7 +946,7 @@ static int ena_stop(struct rte_eth_dev *dev) struct ena_adapter *adapter = dev->data->dev_private; struct ena_com_dev *ena_dev = &adapter->ena_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int rc; /* Cannot free memory in secondary process */ @@ -977,10 +968,9 @@ static int ena_stop(struct rte_eth_dev *dev) rte_intr_disable(intr_handle); rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec != NULL) { - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } + + /* Cleanup vector list */ + rte_intr_vec_list_free(intr_handle); rte_intr_enable(intr_handle); @@ -996,7 +986,7 @@ static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) struct ena_adapter *adapter = ring->adapter; struct ena_com_dev *ena_dev = &adapter->ena_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct ena_com_create_io_ctx ctx = /* policy set to _HOST just to satisfy icc compiler */ { ENA_ADMIN_PLACEMENT_POLICY_HOST, @@ -1016,7 +1006,10 @@ static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) ena_qid = ENA_IO_RXQ_IDX(ring->id); ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; if (rte_intr_dp_is_en(intr_handle)) - ctx.msix_vector = intr_handle->intr_vec[ring->id]; + ctx.msix_vector = + rte_intr_vec_list_index_get(intr_handle, + ring->id); + for (i = 0; i < ring->ring_size; i++) ring->empty_rx_reqs[i] = i; } @@ -1407,7 +1400,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) ++rxq->rx_stats.refill_partial; } - /* When we submitted free recources to device... */ + /* When we submitted free resources to device... */ if (likely(i > 0)) { /* ...let HW know that it can fill buffers with data. */ ena_com_write_sq_doorbell(rxq->ena_com_io_sq); @@ -1825,7 +1818,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) pci_dev->addr.devid, pci_dev->addr.function); - intr_handle = &pci_dev->intr_handle; + intr_handle = pci_dev->intr_handle; adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; @@ -2004,9 +1997,9 @@ static int ena_dev_configure(struct rte_eth_dev *dev) adapter->state = ENA_ADAPTER_STATE_CONFIG; - if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) - dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; - dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; + dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; /* Scattered Rx cannot be turned off in the HW, so this capability must * be forced. @@ -2067,17 +2060,17 @@ static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter) uint64_t port_offloads = 0; if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM) - port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM; + port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM; if (adapter->offloads.rx_offloads & (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM)) port_offloads |= - DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM; if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH) - port_offloads |= DEV_RX_OFFLOAD_RSS_HASH; + port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; - port_offloads |= DEV_RX_OFFLOAD_SCATTER; + port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; return port_offloads; } @@ -2087,17 +2080,17 @@ static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter) uint64_t port_offloads = 0; if (adapter->offloads.tx_offloads & ENA_IPV4_TSO) - port_offloads |= DEV_TX_OFFLOAD_TCP_TSO; + port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO; if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM) - port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; + port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; if (adapter->offloads.tx_offloads & (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL)) port_offloads |= - DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM; + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM; - port_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; return port_offloads; } @@ -2130,14 +2123,14 @@ static int ena_infos_get(struct rte_eth_dev *dev, ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); dev_info->speed_capa = - ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_2_5G | - ETH_LINK_SPEED_5G | - ETH_LINK_SPEED_10G | - ETH_LINK_SPEED_25G | - ETH_LINK_SPEED_40G | - ETH_LINK_SPEED_50G | - ETH_LINK_SPEED_100G; + RTE_ETH_LINK_SPEED_1G | + RTE_ETH_LINK_SPEED_2_5G | + RTE_ETH_LINK_SPEED_5G | + RTE_ETH_LINK_SPEED_10G | + RTE_ETH_LINK_SPEED_25G | + RTE_ETH_LINK_SPEED_40G | + RTE_ETH_LINK_SPEED_50G | + RTE_ETH_LINK_SPEED_100G; /* Inform framework about available features */ dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter); @@ -2303,7 +2296,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, } #endif - fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH; + fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH; descs_in_use = rx_ring->ring_size - ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; @@ -2354,7 +2347,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx, fill_hash); if (unlikely(mbuf->ol_flags & - (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) { + (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) { rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); ++rx_ring->rx_stats.bad_csum; } @@ -2402,10 +2395,10 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (ol_flags == 0) continue; - l4_csum_flag = ol_flags & PKT_TX_L4_MASK; + l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK; /* SCTP checksum offload is not supported by the ENA. */ if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) || - l4_csum_flag == PKT_TX_SCTP_CKSUM) { + l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) { PMD_TX_LOG(DEBUG, "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n", i, ol_flags); @@ -2413,14 +2406,25 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return i; } + if (unlikely(m->nb_segs >= tx_ring->sgl_size && + !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && + m->nb_segs == tx_ring->sgl_size && + m->data_len < tx_ring->tx_max_header_size))) { + PMD_TX_LOG(DEBUG, + "mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n", + i, m->nb_segs); + rte_errno = EINVAL; + return i; + } + #ifdef RTE_LIBRTE_ETHDEV_DEBUG /* Check if requested offload is also enabled for the queue */ - if ((ol_flags & PKT_TX_IP_CKSUM && - !(tx_ring->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) || - (l4_csum_flag == PKT_TX_TCP_CKSUM && - !(tx_ring->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) || - (l4_csum_flag == PKT_TX_UDP_CKSUM && - !(tx_ring->offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) { + if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM && + !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) || + (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM && + !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) || + (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM && + !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) { PMD_TX_LOG(DEBUG, "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n", i, m->nb_segs, tx_ring->id); @@ -2431,7 +2435,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* The caller is obligated to set l2 and l3 len if any cksum * offload is enabled. */ - if (unlikely(ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK) && + if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) && (m->l2_len == 0 || m->l3_len == 0))) { PMD_TX_LOG(DEBUG, "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n", @@ -2450,14 +2454,14 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * pseudo header checksum is needed. */ need_pseudo_csum = false; - if (ol_flags & PKT_TX_IPV4) { - if (ol_flags & PKT_TX_IP_CKSUM && + if (ol_flags & RTE_MBUF_F_TX_IPV4) { + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM && !(dev_offload_capa & ENA_L3_IPV4_CSUM)) { rte_errno = ENOTSUP; return i; } - if (ol_flags & PKT_TX_TCP_SEG && + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && !(dev_offload_capa & ENA_IPV4_TSO)) { rte_errno = ENOTSUP; return i; @@ -2466,7 +2470,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Check HW capabilities and if pseudo csum is needed * for L4 offloads. */ - if (l4_csum_flag != PKT_TX_L4_NO_CKSUM && + if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && !(dev_offload_capa & ENA_L4_IPV4_CSUM)) { if (dev_offload_capa & ENA_L4_IPV4_CSUM_PARTIAL) { @@ -2483,22 +2487,22 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); if (frag_field & RTE_IPV4_HDR_DF_FLAG) { m->packet_type |= RTE_PTYPE_L4_NONFRAG; - } else if (ol_flags & PKT_TX_TCP_SEG) { + } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { /* In case we are supposed to TSO and have DF * not set (DF=0) hardware must be provided with * partial checksum. */ need_pseudo_csum = true; } - } else if (ol_flags & PKT_TX_IPV6) { + } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { /* There is no support for IPv6 TSO as for now. */ - if (ol_flags & PKT_TX_TCP_SEG) { + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { rte_errno = ENOTSUP; return i; } /* Check HW capabilities and if pseudo csum is needed */ - if (l4_csum_flag != PKT_TX_L4_NO_CKSUM && + if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && !(dev_offload_capa & ENA_L4_IPV6_CSUM)) { if (dev_offload_capa & ENA_L4_IPV6_CSUM_PARTIAL) { @@ -2559,56 +2563,6 @@ static void ena_update_hints(struct ena_adapter *adapter, } } -static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring, - struct rte_mbuf *mbuf) -{ - struct ena_com_dev *ena_dev; - int num_segments, header_len, rc; - - ena_dev = &tx_ring->adapter->ena_dev; - num_segments = mbuf->nb_segs; - header_len = mbuf->data_len; - - if (likely(num_segments < tx_ring->sgl_size)) - goto checkspace; - - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && - (num_segments == tx_ring->sgl_size) && - (header_len < tx_ring->tx_max_header_size)) - goto checkspace; - - /* Checking for space for 2 additional metadata descriptors due to - * possible header split and metadata descriptor. Linearization will - * be needed so we reduce the segments number from num_segments to 1 - */ - if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) { - PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); - return ENA_COM_NO_MEM; - } - ++tx_ring->tx_stats.linearize; - rc = rte_pktmbuf_linearize(mbuf); - if (unlikely(rc)) { - PMD_TX_LOG(WARNING, "Mbuf linearize failed\n"); - rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); - ++tx_ring->tx_stats.linearize_failed; - return rc; - } - - return 0; - -checkspace: - /* Checking for space for 2 additional metadata descriptors due to - * possible header split and metadata descriptor - */ - if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, - num_segments + 2)) { - PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); - return ENA_COM_NO_MEM; - } - - return 0; -} - static void ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, struct rte_mbuf *mbuf, @@ -2693,9 +2647,14 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) int nb_hw_desc; int rc; - rc = ena_check_space_and_linearize_mbuf(tx_ring, mbuf); - if (unlikely(rc)) - return rc; + /* Checking for space for 2 additional metadata descriptors due to + * possible header split and metadata descriptor + */ + if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + mbuf->nb_segs + 2)) { + PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n"); + return ENA_COM_NO_MEM; + } next_to_use = tx_ring->next_to_use; @@ -3113,7 +3072,7 @@ static int ena_parse_devargs(struct ena_adapter *adapter, static int ena_setup_rx_intr(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int rc; uint16_t vectors_nb, i; bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq; @@ -3140,9 +3099,9 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev) goto enable_intr; } - intr_handle->intr_vec = rte_zmalloc("intr_vec", - dev->data->nb_rx_queues * sizeof(*intr_handle->intr_vec), 0); - if (intr_handle->intr_vec == NULL) { + /* Allocate the vector list */ + if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", + dev->data->nb_rx_queues)) { PMD_DRV_LOG(ERR, "Failed to allocate interrupt vector for %d queues\n", dev->data->nb_rx_queues); @@ -3161,7 +3120,9 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev) } for (i = 0; i < vectors_nb; ++i) - intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i; + if (rte_intr_vec_list_index_set(intr_handle, i, + RTE_INTR_VEC_RXTX_OFFSET + i)) + goto disable_intr_efd; rte_intr_enable(intr_handle); return 0; @@ -3169,8 +3130,7 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev) disable_intr_efd: rte_intr_efd_disable(intr_handle); free_intr_vec: - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; + rte_intr_vec_list_free(intr_handle); enable_intr: rte_intr_enable(intr_handle); return rc;