From: Cunming Liang Date: Wed, 4 Nov 2015 08:45:40 +0000 (+0800) Subject: i40evf: support Rx interrupt X-Git-Tag: spdx-start~8120 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=4b90a3ff26c5a2def84e06cd67c3792e5d313c75;p=dpdk.git i40evf: support Rx interrupt The patch enables rx interrupt support on i40e VF and some necessary change on PF IOV mode to support VF. On PF side, running in IOV mode via uio won't allow rx interrupt which is exclusive with mbox interrupt in single vector competition. On VF side, one single vector is shared for all the rx queues. Signed-off-by: Cunming Liang --- diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 1cca67786a..a39bd283fc 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -76,11 +76,6 @@ /* Maximun number of VSI */ #define I40E_MAX_NUM_VSIS (384UL) -/* Default queue interrupt throttling time in microseconds */ -#define I40E_ITR_INDEX_DEFAULT 0 -#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ -#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ - #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */ /* Flow control default timer */ @@ -1099,16 +1094,6 @@ i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) I40E_WRITE_FLUSH(hw); } -static inline uint16_t -i40e_calc_itr_interval(int16_t interval) -{ - if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) - interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; - - /* Convert to hardware count, as writing each 1 represents 2 us */ - return (interval/2); -} - static void __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, int base_queue, int nb_queue) @@ -1159,13 +1144,24 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, } else { uint32_t reg; - /* num_msix_vectors_vf needs to minus irq0 */ - reg = (hw->func_caps.num_msix_vectors_vf - 1) * - vsi->user_param + (msix_vect - 1); + if (msix_vect == I40E_MISC_VEC_ID) { + I40E_WRITE_REG(hw, + I40E_VPINT_LNKLST0(vsi->user_param), + (base_queue << + I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); + } else { + /* num_msix_vectors_vf needs to minus irq0 */ + reg = (hw->func_caps.num_msix_vectors_vf - 1) * + vsi->user_param + (msix_vect - 1); - I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (base_queue << - I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | - (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); + I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), + (base_queue << + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); + } } I40E_WRITE_FLUSH(hw); diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index eff3adbbfe..d28193548c 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -162,6 +162,11 @@ enum i40e_flxpld_layer_idx { #define I40E_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET #define I40E_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET +/* Default queue interrupt throttling time in microseconds */ +#define I40E_ITR_INDEX_DEFAULT 0 +#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ +#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ + struct i40e_adapter; /** @@ -638,6 +643,16 @@ i40e_align_floor(int n) return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)); } +static inline uint16_t +i40e_calc_itr_interval(int16_t interval) +{ + if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) + interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + + /* Convert to hardware count, as writing each 1 represents 2 us */ + return (interval / 2); +} + #define I40E_VALID_FLOW(flow_type) \ ((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \ diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 18ec46e949..615da8dff2 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -150,6 +150,10 @@ static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); +static int +i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); +static int +i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); /* Default hash key buffer for RSS */ static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1]; @@ -201,6 +205,9 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .tx_queue_stop = i40evf_dev_tx_queue_stop, .rx_queue_setup = i40e_dev_rx_queue_setup, .rx_queue_release = i40e_dev_rx_queue_release, + .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable, + .rx_descriptor_done = i40e_dev_rx_descriptor_done, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, .reta_update = i40evf_dev_rss_reta_update, @@ -741,22 +748,33 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \ sizeof(struct i40e_virtchnl_vector_map)]; struct i40e_virtchnl_irq_map_info *map_info; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + uint32_t vector_id; int i, err; + + if (rte_intr_allow_others(intr_handle)) { + if (vf->version_major == I40E_DPDK_VERSION_MAJOR) + vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR; + else + vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX; + } else { + vector_id = I40E_MISC_VEC_ID; + } + map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer; map_info->num_vectors = 1; map_info->vecmap[0].rxitr_idx = I40E_QINT_RQCTL_MSIX_INDX_NOITR; map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id; /* Alway use default dynamic MSIX interrupt */ - if (vf->version_major == I40E_DPDK_VERSION_MAJOR) - map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR; - else - map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX; - + map_info->vecmap[0].vector_id = vector_id; /* Don't map any tx queue */ map_info->vecmap[0].txq_map = 0; map_info->vecmap[0].rxq_map = 0; - for (i = 0; i < dev->data->nb_rx_queues; i++) + for (i = 0; i < dev->data->nb_rx_queues; i++) { map_info->vecmap[0].rxq_map |= 1 << i; + if (rte_intr_dp_is_en(intr_handle)) + intr_handle->intr_vec[i] = vector_id; + } args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; args.in_args = (u8 *)cmd_buffer; @@ -1669,6 +1687,16 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + + if (!rte_intr_allow_others(intr_handle)) { + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK); + I40E_WRITE_FLUSH(hw); + return; + } if (vf->version_major == I40E_DPDK_VERSION_MAJOR) /* To support DPDK PF host */ @@ -1681,6 +1709,8 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev) I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK); + + I40E_WRITE_FLUSH(hw); } static inline void @@ -1688,13 +1718,78 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + + if (!rte_intr_allow_others(intr_handle)) { + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0); + I40E_WRITE_FLUSH(hw); + return; + } if (vf->version_major == I40E_DPDK_VERSION_MAJOR) I40E_WRITE_REG(hw, - I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1), - 0); + I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR + - 1), + 0); + else + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0); + + I40E_WRITE_FLUSH(hw); +} + +static int +i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t interval = + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == I40E_MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK | + (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) | + (interval << + I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)); else + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTLN1(msix_intr - + I40E_RX_VEC_START), + I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (interval << + I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)); + + I40E_WRITE_FLUSH(hw); + + rte_intr_enable(&dev->pci_dev->intr_handle); + + return 0; +} + +static int +i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == I40E_MISC_VEC_ID) I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0); + else + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTLN1(msix_intr - + I40E_RX_VEC_START), + 0); + + I40E_WRITE_FLUSH(hw); + + return 0; } static int @@ -1702,7 +1797,9 @@ i40evf_dev_start(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; struct ether_addr mac_addr; + uint32_t intr_vector = 0; PMD_INIT_FUNC_TRACE(); @@ -1712,6 +1809,24 @@ i40evf_dev_start(struct rte_eth_dev *dev) vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues); + /* check and configure queue intr-vector mapping */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec\n", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + if (i40evf_rx_init(dev) != 0){ PMD_DRV_LOG(ERR, "failed to do RX init"); return -1; @@ -1741,6 +1856,10 @@ i40evf_dev_start(struct rte_eth_dev *dev) goto err_mac; } + /* vf don't allow intr except for rxq intr */ + if (dev->data->dev_conf.intr_conf.rxq != 0) + rte_intr_enable(intr_handle); + i40evf_enable_queues_intr(dev); return 0; @@ -1753,11 +1872,20 @@ err_queue: static void i40evf_dev_stop(struct rte_eth_dev *dev) { + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + PMD_INIT_FUNC_TRACE(); - i40evf_disable_queues_intr(dev); i40evf_stop_queues(dev); + i40evf_disable_queues_intr(dev); i40e_dev_clear_queues(dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } } static int diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c index c1d58a8410..cbf4e5bbc9 100644 --- a/drivers/net/i40e/i40e_pf.c +++ b/drivers/net/i40e/i40e_pf.c @@ -547,11 +547,6 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf, goto send_msg; } - if (irqmap->vecmap[0].vector_id == 0) { - PMD_DRV_LOG(ERR, "DPDK host don't support use IRQ0"); - ret = I40E_ERR_PARAM; - goto send_msg; - } /* This MSIX intr store the intr in VF range */ vf->vsi->msix_intr = irqmap->vecmap[0].vector_id; vf->vsi->nb_msix = irqmap->num_vectors;