X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_rxtx.c;h=40cffc103d9765f832a919c737b8a5889e4f7732;hb=693f715da45c48ec1ec0fe4ba2f3b5ffd11ba53e;hp=d37cbde2fcca6537093f3294edbcdcd5a01726c9;hpb=0e0da28cd888b06b38d316cc0df279eb36dcf693;p=dpdk.git diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index d37cbde2fc..40cffc103d 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -57,9 +57,6 @@ #include "i40e_ethdev.h" #include "i40e_rxtx.h" -#define I40E_MIN_RING_DESC 64 -#define I40E_MAX_RING_DESC 4096 -#define I40E_ALIGN 128 #define DEFAULT_TX_RS_THRESH 32 #define DEFAULT_TX_FREE_THRESH 32 #define I40E_MAX_PKT_TYPE 256 @@ -68,6 +65,9 @@ #define I40E_DMA_MEM_ALIGN 4096 +/* Base address of the HW descriptor ring should be 128B aligned. */ +#define I40E_RING_BASE_ALIGN 128 + #define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ ETH_TXQ_FLAGS_NOOFFLOADS) @@ -84,12 +84,6 @@ #define RTE_MBUF_DATA_DMA_ADDR(mb) \ ((uint64_t)((mb)->buf_physaddr + (mb)->data_off)) -static const struct rte_memzone * -i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev, - const char *ring_name, - uint16_t queue_id, - uint32_t ring_size, - int socket_id); static uint16_t i40e_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); @@ -1489,7 +1483,7 @@ i40e_calc_context_desc(uint64_t flags) mask |= PKT_TX_IEEE1588_TMST; #endif - return ((flags & mask) ? 1 : 0); + return (flags & mask) ? 1 : 0; } /* set i40e TSO context descriptor */ @@ -2006,7 +2000,8 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) i40e_rx_queue_release_mbufs(rxq); i40e_reset_rx_queue(rxq); - } + } else + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; } return err; @@ -2035,6 +2030,7 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) } i40e_rx_queue_release_mbufs(rxq); i40e_reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; } return 0; @@ -2060,6 +2056,8 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) if (err) PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", tx_queue_id); + else + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; } return err; @@ -2089,6 +2087,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) i40e_tx_queue_release_mbufs(txq); i40e_reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; } return 0; @@ -2105,10 +2104,13 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, struct i40e_vsi *vsi; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct i40e_rx_queue *rxq; const struct rte_memzone *rz; uint32_t ring_size; - uint16_t len; + uint16_t len, i; + uint16_t base, bsf, tc_mapping; int use_def_burst_func = 1; if (hw->mac.type == I40E_MAC_VF) { @@ -2123,9 +2125,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, "index exceeds the maximum"); return I40E_ERR_PARAM; } - if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 || - (nb_desc > I40E_MAX_RING_DESC) || - (nb_desc < I40E_MIN_RING_DESC)) { + if (nb_desc % I40E_ALIGN_RING_DESC != 0 || + (nb_desc > I40E_MAX_RING_DESC) || + (nb_desc < I40E_MIN_RING_DESC)) { PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is " "invalid", nb_desc); return I40E_ERR_PARAM; @@ -2145,7 +2147,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, if (!rxq) { PMD_DRV_LOG(ERR, "Failed to allocate memory for " "rx queue data structure"); - return (-ENOMEM); + return -ENOMEM; } rxq->mp = mp; rxq->nb_rx_desc = nb_desc; @@ -2167,26 +2169,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, /* Allocate the maximun number of RX ring hardware descriptor. */ ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC; ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); - rz = i40e_ring_dma_zone_reserve(dev, - "rx_ring", - queue_idx, - ring_size, - socket_id); + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, + ring_size, I40E_RING_BASE_ALIGN, socket_id); if (!rz) { i40e_dev_rx_queue_release(rxq); PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX"); - return (-ENOMEM); + return -ENOMEM; } /* Zero all the descriptors in the ring. */ memset(rz->addr, 0, ring_size); -#ifdef RTE_LIBRTE_XEN_DOM0 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); -#else - rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr; -#endif - rxq->rx_ring = (union i40e_rx_desc *)rz->addr; #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC @@ -2204,7 +2198,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, if (!rxq->sw_ring) { i40e_dev_rx_queue_release(rxq); PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring"); - return (-ENOMEM); + return -ENOMEM; } i40e_reset_rx_queue(rxq); @@ -2213,13 +2207,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq); - if (!use_def_burst_func && !dev->data->scattered_rx) { + if (!use_def_burst_func) { #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function will be " "used on port=%d, queue=%d.", rxq->port_id, rxq->queue_id); - dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc; #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " @@ -2227,6 +2220,20 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " "not enabled on port=%d, queue=%d.", rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; + tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); + base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; + bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; + + if (queue_idx >= base && queue_idx < (base + BIT(bsf))) + rxq->dcb_tc = i; } return 0; @@ -2321,6 +2328,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, const struct rte_memzone *tz; uint32_t ring_size; uint16_t tx_rs_thresh, tx_free_thresh; + uint16_t i, base, bsf, tc_mapping; if (hw->mac.type == I40E_MAC_VF) { struct i40e_vf *vf = @@ -2335,9 +2343,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, return I40E_ERR_PARAM; } - if (((nb_desc * sizeof(struct i40e_tx_desc)) % I40E_ALIGN) != 0 || - (nb_desc > I40E_MAX_RING_DESC) || - (nb_desc < I40E_MIN_RING_DESC)) { + if (nb_desc % I40E_ALIGN_RING_DESC != 0 || + (nb_desc > I40E_MAX_RING_DESC) || + (nb_desc < I40E_MIN_RING_DESC)) { PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is " "invalid", nb_desc); return I40E_ERR_PARAM; @@ -2429,21 +2437,18 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, if (!txq) { PMD_DRV_LOG(ERR, "Failed to allocate memory for " "tx queue structure"); - return (-ENOMEM); + return -ENOMEM; } /* Allocate TX hardware ring descriptors. */ ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC; ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); - tz = i40e_ring_dma_zone_reserve(dev, - "tx_ring", - queue_idx, - ring_size, - socket_id); + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, + ring_size, I40E_RING_BASE_ALIGN, socket_id); if (!tz) { i40e_dev_tx_queue_release(txq); PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX"); - return (-ENOMEM); + return -ENOMEM; } txq->nb_tx_desc = nb_desc; @@ -2464,11 +2469,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->vsi = vsi; txq->tx_deferred_start = tx_conf->tx_deferred_start; -#ifdef RTE_LIBRTE_XEN_DOM0 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); -#else - txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr; -#endif txq->tx_ring = (struct i40e_tx_desc *)tz->addr; /* Allocate software ring */ @@ -2480,7 +2481,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, if (!txq->sw_ring) { i40e_dev_tx_queue_release(txq); PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring"); - return (-ENOMEM); + return -ENOMEM; } i40e_reset_tx_queue(txq); @@ -2488,13 +2489,19 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, dev->data->tx_queues[queue_idx] = txq; /* Use a simple TX queue without offloads or multi segs if possible */ - if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) && - (txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) { - PMD_INIT_LOG(INFO, "Using simple tx path"); - dev->tx_pkt_burst = i40e_xmit_pkts_simple; - } else { - PMD_INIT_LOG(INFO, "Using full-featured tx path"); - dev->tx_pkt_burst = i40e_xmit_pkts; + i40e_set_tx_function_flag(dev, txq); + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; + tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); + base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; + bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; + + if (queue_idx >= base && queue_idx < (base + BIT(bsf))) + txq->dcb_tc = i; } return 0; @@ -2515,47 +2522,21 @@ i40e_dev_tx_queue_release(void *txq) rte_free(q); } -static const struct rte_memzone * -i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev, - const char *ring_name, - uint16_t queue_id, - uint32_t ring_size, - int socket_id) -{ - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.name, ring_name, - dev->data->port_id, queue_id); - mz = rte_memzone_lookup(z_name); - if (mz) - return mz; - -#ifdef RTE_LIBRTE_XEN_DOM0 - return rte_memzone_reserve_bounded(z_name, ring_size, - socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M); -#else - return rte_memzone_reserve_aligned(z_name, ring_size, - socket_id, 0, I40E_ALIGN); -#endif -} - const struct rte_memzone * i40e_memzone_reserve(const char *name, uint32_t len, int socket_id) { - const struct rte_memzone *mz = NULL; + const struct rte_memzone *mz; mz = rte_memzone_lookup(name); if (mz) return mz; -#ifdef RTE_LIBRTE_XEN_DOM0 - mz = rte_memzone_reserve_bounded(name, len, - socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M); -#else - mz = rte_memzone_reserve_aligned(name, len, - socket_id, 0, I40E_ALIGN); -#endif + + if (rte_xen_dom0_supported()) + mz = rte_memzone_reserve_bounded(name, len, + socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M); + else + mz = rte_memzone_reserve_aligned(name, len, + socket_id, 0, I40E_RING_BASE_ALIGN); return mz; } @@ -2564,6 +2545,12 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq) { uint16_t i; + /* SSE Vector driver has a different way of releasing mbufs. */ + if (rxq->rx_using_sse) { + i40e_rx_queue_release_mbufs_vec(rxq); + return; + } + if (!rxq || !rxq->sw_ring) { PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); return; @@ -2703,7 +2690,7 @@ i40e_tx_queue_init(struct i40e_tx_queue *txq) #ifdef RTE_LIBRTE_IEEE1588 tx_ctx.timesync_ena = 1; #endif - tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[0]); + tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[txq->dcb_tc]); if (vsi->type == I40E_VSI_FDIR) tx_ctx.fd_ena = TRUE; @@ -2837,7 +2824,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) int err = I40E_SUCCESS; struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi); struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi); - struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(rxq->vsi); uint16_t pf_q = rxq->reg_idx; uint16_t buf_size; struct i40e_hmc_obj_rxq rx_ctx; @@ -2893,7 +2879,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) /* Check if scattered RX needs to be used. */ if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) { dev_data->scattered_rx = 1; - dev->rx_pkt_burst = i40e_recv_scattered_pkts; } /* Init the RX tail regieter. */ @@ -2971,11 +2956,9 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf) ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC; ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); - tz = i40e_ring_dma_zone_reserve(dev, - "fdir_tx_ring", - I40E_FDIR_QUEUE_ID, - ring_size, - SOCKET_ID_ANY); + tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring", + I40E_FDIR_QUEUE_ID, ring_size, + I40E_RING_BASE_ALIGN, SOCKET_ID_ANY); if (!tz) { i40e_dev_tx_queue_release(txq); PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX."); @@ -2987,11 +2970,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf) txq->reg_idx = pf->fdir.fdir_vsi->base_queue; txq->vsi = pf->fdir.fdir_vsi; -#ifdef RTE_LIBRTE_XEN_DOM0 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); -#else - txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr; -#endif txq->tx_ring = (struct i40e_tx_desc *)tz->addr; /* * don't need to allocate software ring and reset for the fdir @@ -3031,11 +3010,9 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf) ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC; ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); - rz = i40e_ring_dma_zone_reserve(dev, - "fdir_rx_ring", - I40E_FDIR_QUEUE_ID, - ring_size, - SOCKET_ID_ANY); + rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring", + I40E_FDIR_QUEUE_ID, ring_size, + I40E_RING_BASE_ALIGN, SOCKET_ID_ANY); if (!rz) { i40e_dev_rx_queue_release(rxq); PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX."); @@ -3047,11 +3024,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf) rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; rxq->vsi = pf->fdir.fdir_vsi; -#ifdef RTE_LIBRTE_XEN_DOM0 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); -#else - rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr; -#endif rxq->rx_ring = (union i40e_rx_desc *)rz->addr; /* @@ -3064,7 +3037,196 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf) return I40E_SUCCESS; } +void +i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct i40e_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mp; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; +} + +void +i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct i40e_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +void __attribute__((cold)) +i40e_set_rx_function(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + uint16_t rx_using_sse, i; + /* In order to allow Vector Rx there are a few configuration + * conditions to be met and Rx Bulk Allocation should be allowed. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (i40e_rx_vec_dev_conf_condition_check(dev) || + !ad->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet" + " Vector Rx preconditions", + dev->data->port_id); + + ad->rx_vec_allowed = false; + } + if (ad->rx_vec_allowed) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct i40e_rx_queue *rxq = + dev->data->rx_queues[i]; + + if (i40e_rxq_vec_setup(rxq)) { + ad->rx_vec_allowed = false; + break; + } + } + } + } + + if (dev->data->scattered_rx) { + /* Set the non-LRO scattered callback: there are Vector and + * single allocation versions. + */ + if (ad->rx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx " + "callback (port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec; + } else { + PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " + "allocation callback (port=%d).", + dev->data->port_id); + dev->rx_pkt_burst = i40e_recv_scattered_pkts; + } + /* If parameters allow we are going to choose between the following + * callbacks: + * - Vector + * - Bulk Allocation + * - Single buffer allocation (the simplest one) + */ + } else if (ad->rx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX " + "burst size no less than %d (port=%d).", + RTE_I40E_DESCS_PER_LOOP, + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts_vec; + } else if (ad->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function " + "will be used on port=%d.", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc; + } else { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " + "satisfied, or Scattered Rx is requested " + "(port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts; + } + + /* Propagate information about RX function choice through all queues. */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + rx_using_sse = + (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || + dev->rx_pkt_burst == i40e_recv_pkts_vec); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct i40e_rx_queue *rxq = dev->data->rx_queues[i]; + + rxq->rx_using_sse = rx_using_sse; + } + } +} + +void __attribute__((cold)) +i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* Use a simple Tx queue (no offloads, no multi segs) if possible */ + if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) + && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) { + if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) { + PMD_INIT_LOG(DEBUG, "Vector tx" + " can be enabled on this txq."); + + } else { + ad->tx_vec_allowed = false; + } + } else { + ad->tx_simple_allowed = false; + } +} + +void __attribute__((cold)) +i40e_set_tx_function(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int i; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (ad->tx_vec_allowed) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct i40e_tx_queue *txq = + dev->data->tx_queues[i]; + + if (i40e_txq_vec_setup(txq)) { + ad->tx_vec_allowed = false; + break; + } + } + } + } + + if (ad->tx_simple_allowed) { + if (ad->tx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Vector tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts_vec; + } else { + PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts_simple; + } + } else { + PMD_INIT_LOG(DEBUG, "Xmit tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts; + } +} + /* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */ +int __attribute__((weak)) +i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) +{ + return -1; +} + uint16_t __attribute__((weak)) i40e_recv_pkts_vec( void __rte_unused *rx_queue, @@ -3089,6 +3251,12 @@ i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq) return -1; } +int __attribute__((weak)) +i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) +{ + return -1; +} + void __attribute__((weak)) i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq) {