X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fenic_main.c;h=57574a5f7ea7d2129838ef86ad6cbebe91132089;hb=aa2cbd32e9695c0f552ee26653b7867cade5ffb3;hp=e507df1f6f1f850d52265354bb0f7bc80de72cf7;hpb=9970a9ad07db7745ca6bc441819b287940ae86ea;p=dpdk.git diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index e507df1f6f..57574a5f7e 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -357,7 +357,7 @@ enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq) rq->need_initial_post = false; } -static void * +void * enic_alloc_consistent(void *priv, size_t size, dma_addr_t *dma_handle, u8 *name) { @@ -397,7 +397,7 @@ enic_alloc_consistent(void *priv, size_t size, return vaddr; } -static void +void enic_free_consistent(void *priv, __rte_unused size_t size, void *vaddr, @@ -424,9 +424,9 @@ enic_free_consistent(void *priv, rte_free(mze); } -int enic_link_update(struct enic *enic) +int enic_link_update(struct rte_eth_dev *eth_dev) { - struct rte_eth_dev *eth_dev = enic->rte_dev; + struct enic *enic = pmd_priv(eth_dev); struct rte_eth_link link; memset(&link, 0, sizeof(link)); @@ -445,9 +445,11 @@ enic_intr_handler(void *arg) vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]); - enic_link_update(enic); + enic_link_update(dev); _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); enic_log_q_error(enic); + /* Re-enable irq in case of INTx */ + rte_intr_ack(&enic->pdev->intr_handle); } static int enic_rxq_intr_init(struct enic *enic) @@ -525,14 +527,14 @@ static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx) * used when that file is not compiled. */ __rte_weak bool -enic_use_vector_rx_handler(__rte_unused struct enic *enic) +enic_use_vector_rx_handler(__rte_unused struct rte_eth_dev *eth_dev) { return false; } -static void pick_rx_handler(struct enic *enic) +void enic_pick_rx_handler(struct rte_eth_dev *eth_dev) { - struct rte_eth_dev *eth_dev; + struct enic *enic = pmd_priv(eth_dev); /* * Preference order: @@ -540,8 +542,7 @@ static void pick_rx_handler(struct enic *enic) * 2. The non-scatter, simplified handler if scatter Rx is not used. * 3. The default handler as a fallback. */ - eth_dev = enic->rte_dev; - if (enic_use_vector_rx_handler(enic)) + if (enic_use_vector_rx_handler(eth_dev)) return; if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) { ENICPMD_LOG(DEBUG, " use the non-scatter Rx handler"); @@ -552,6 +553,20 @@ static void pick_rx_handler(struct enic *enic) } } +/* Secondary process uses this to set the Tx handler */ +void enic_pick_tx_handler(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + if (enic->use_simple_tx_handler) { + ENICPMD_LOG(DEBUG, " use the simple tx handler"); + eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts; + } else { + ENICPMD_LOG(DEBUG, " use the default tx handler"); + eth_dev->tx_pkt_burst = &enic_xmit_pkts; + } +} + int enic_enable(struct enic *enic) { unsigned int index; @@ -594,6 +609,9 @@ int enic_enable(struct enic *enic) dev_warning(enic, "Init of hash table for clsf failed."\ "Flow director feature will not work\n"); + if (enic_fm_init(enic)) + dev_warning(enic, "Init of flowman failed.\n"); + for (index = 0; index < enic->rq_count; index++) { err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]); @@ -629,12 +647,13 @@ int enic_enable(struct enic *enic) eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts; for (index = 0; index < enic->wq_count; index++) enic_prep_wq_for_simple_tx(enic, index); + enic->use_simple_tx_handler = 1; } else { ENICPMD_LOG(DEBUG, " use the default tx handler"); eth_dev->tx_pkt_burst = &enic_xmit_pkts; } - pick_rx_handler(enic); + enic_pick_rx_handler(eth_dev); for (index = 0; index < enic->wq_count; index++) enic_start_wq(enic, index); @@ -724,31 +743,31 @@ void enic_free_rq(void *rxq) void enic_start_wq(struct enic *enic, uint16_t queue_idx) { - struct rte_eth_dev *eth_dev = enic->rte_dev; + struct rte_eth_dev_data *data = enic->dev_data; vnic_wq_enable(&enic->wq[queue_idx]); - eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; + data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; } int enic_stop_wq(struct enic *enic, uint16_t queue_idx) { - struct rte_eth_dev *eth_dev = enic->rte_dev; + struct rte_eth_dev_data *data = enic->dev_data; int ret; ret = vnic_wq_disable(&enic->wq[queue_idx]); if (ret) return ret; - eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; + data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } void enic_start_rq(struct enic *enic, uint16_t queue_idx) { + struct rte_eth_dev_data *data = enic->dev_data; struct vnic_rq *rq_sop; struct vnic_rq *rq_data; rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; rq_data = &enic->rq[rq_sop->data_queue_idx]; - struct rte_eth_dev *eth_dev = enic->rte_dev; if (rq_data->in_use) { vnic_rq_enable(rq_data); @@ -757,13 +776,13 @@ void enic_start_rq(struct enic *enic, uint16_t queue_idx) rte_mb(); vnic_rq_enable(rq_sop); enic_initial_post_rx(enic, rq_sop); - eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; + data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; } int enic_stop_rq(struct enic *enic, uint16_t queue_idx) { + struct rte_eth_dev_data *data = enic->dev_data; int ret1 = 0, ret2 = 0; - struct rte_eth_dev *eth_dev = enic->rte_dev; struct vnic_rq *rq_sop; struct vnic_rq *rq_data; rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; @@ -779,7 +798,7 @@ int enic_stop_rq(struct enic *enic, uint16_t queue_idx) else if (ret1) return ret1; - eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; + data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -1050,6 +1069,7 @@ int enic_disable(struct enic *enic) vnic_dev_disable(enic->vdev); enic_clsf_destroy(enic); + enic_fm_destroy(enic); if (!enic_is_sriov_vf(enic)) vnic_dev_del_addr(enic->vdev, enic->mac_addr); @@ -1605,7 +1625,7 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu) /* put back the real receive function */ rte_mb(); - pick_rx_handler(enic); + enic_pick_rx_handler(eth_dev); rte_mb(); /* restart Rx traffic */ @@ -1690,6 +1710,19 @@ static int enic_dev_init(struct enic *enic) /* set up link status checking */ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ + /* + * When Geneve with options offload is available, always disable it + * first as it can interfere with user flow rules. + */ + if (enic->geneve_opt_avail) { + /* + * Disabling fails if the feature is provisioned but + * not enabled. So ignore result and do not log error. + */ + vnic_dev_overlay_offload_ctrl(enic->vdev, + OVERLAY_FEATURE_GENEVE, + OVERLAY_OFFLOAD_DISABLE); + } enic->overlay_offload = false; if (enic->disable_overlay && enic->vxlan) { /* @@ -1721,20 +1754,32 @@ static int enic_dev_init(struct enic *enic) enic->overlay_offload = true; dev_info(enic, "Overlay offload is enabled\n"); } + /* Geneve with options offload requires overlay offload */ + if (enic->overlay_offload && enic->geneve_opt_avail && + enic->geneve_opt_request) { + if (vnic_dev_overlay_offload_ctrl(enic->vdev, + OVERLAY_FEATURE_GENEVE, + OVERLAY_OFFLOAD_ENABLE)) { + dev_err(enic, "failed to enable geneve+option\n"); + } else { + enic->geneve_opt_enabled = 1; + dev_info(enic, "Geneve with options is enabled\n"); + } + } /* * Reset the vxlan port if HW vxlan parsing is available. It * is always enabled regardless of overlay offload * enable/disable. */ if (enic->vxlan) { - enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT; + enic->vxlan_port = RTE_VXLAN_DEFAULT_PORT; /* * Reset the vxlan port to the default, as the NIC firmware * does not reset it automatically and keeps the old setting. */ if (vnic_dev_overlay_offload_cfg(enic->vdev, OVERLAY_CFG_VXLAN_PORT_UPDATE, - ENIC_DEFAULT_VXLAN_PORT)) { + RTE_VXLAN_DEFAULT_PORT)) { dev_err(enic, "failed to update vxlan port\n"); return -EINVAL; }