X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fenic_main.c;h=7942b0df6bd50045b5e8e2dbda4856d9f9611858;hb=c52ff36686a408fd5196452380e359120c2d1ed5;hp=30c7b1c8641ffce77ab1d9c035ae6b2b137edc70;hpb=e92a4b4148d3caadb5d3a8351fdf0fa3a2a709c2;p=dpdk.git diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index 30c7b1c864..7942b0df6b 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -79,7 +79,7 @@ static void enic_free_wq_buf(struct rte_mbuf **buf) static void enic_log_q_error(struct enic *enic) { unsigned int i; - u32 error_status; + uint32_t error_status; for (i = 0; i < enic->wq_count; i++) { error_status = vnic_wq_error_status(&enic->wq[i]); @@ -222,13 +222,12 @@ void enic_init_vnic_resources(struct enic *enic) error_interrupt_enable, error_interrupt_offset); - data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)]; + data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)]; if (data_rq->in_use) vnic_rq_init(data_rq, cq_idx, error_interrupt_enable, error_interrupt_offset); - vnic_cq_init(&enic->cq[cq_idx], 0 /* flow_control_enable */, 1 /* color_enable */, @@ -264,7 +263,7 @@ void enic_init_vnic_resources(struct enic *enic) 0 /* cq_entry_enable */, 1 /* cq_message_enable */, 0 /* interrupt offset */, - (u64)enic->wq[index].cqmsg_rz->iova); + (uint64_t)enic->wq[index].cqmsg_rz->iova); } for (index = 0; index < enic->intr_count; index++) { @@ -357,9 +356,9 @@ enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq) rq->need_initial_post = false; } -static void * +void * enic_alloc_consistent(void *priv, size_t size, - dma_addr_t *dma_handle, u8 *name) + dma_addr_t *dma_handle, uint8_t *name) { void *vaddr; const struct rte_memzone *rz; @@ -368,7 +367,7 @@ enic_alloc_consistent(void *priv, size_t size, struct enic_memzone_entry *mze; rz = rte_memzone_reserve_aligned((const char *)name, size, - SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN); + SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE); if (!rz) { pr_err("%s : Failed to allocate memory requested for %s\n", __func__, name); @@ -397,7 +396,7 @@ enic_alloc_consistent(void *priv, size_t size, return vaddr; } -static void +void enic_free_consistent(void *priv, __rte_unused size_t size, void *vaddr, @@ -424,9 +423,9 @@ enic_free_consistent(void *priv, rte_free(mze); } -int enic_link_update(struct enic *enic) +int enic_link_update(struct rte_eth_dev *eth_dev) { - struct rte_eth_dev *eth_dev = enic->rte_dev; + struct enic *enic = pmd_priv(eth_dev); struct rte_eth_link link; memset(&link, 0, sizeof(link)); @@ -445,9 +444,11 @@ enic_intr_handler(void *arg) vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]); - enic_link_update(enic); + enic_link_update(dev); _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); enic_log_q_error(enic); + /* Re-enable irq in case of INTx */ + rte_intr_ack(&enic->pdev->intr_handle); } static int enic_rxq_intr_init(struct enic *enic) @@ -607,6 +608,9 @@ int enic_enable(struct enic *enic) dev_warning(enic, "Init of hash table for clsf failed."\ "Flow director feature will not work\n"); + if (enic_fm_init(enic)) + dev_warning(enic, "Init of flowman failed.\n"); + for (index = 0; index < enic->rq_count; index++) { err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]); @@ -615,7 +619,7 @@ int enic_enable(struct enic *enic) return err; } err = enic_alloc_rx_queue_mbufs(enic, - &enic->rq[enic_rte_rq_idx_to_data_idx(index)]); + &enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)]); if (err) { /* release the allocated mbufs for the sop rq*/ enic_rxmbuf_queue_release(enic, @@ -738,31 +742,31 @@ void enic_free_rq(void *rxq) void enic_start_wq(struct enic *enic, uint16_t queue_idx) { - struct rte_eth_dev *eth_dev = enic->rte_dev; + struct rte_eth_dev_data *data = enic->dev_data; vnic_wq_enable(&enic->wq[queue_idx]); - eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; + data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; } int enic_stop_wq(struct enic *enic, uint16_t queue_idx) { - struct rte_eth_dev *eth_dev = enic->rte_dev; + struct rte_eth_dev_data *data = enic->dev_data; int ret; ret = vnic_wq_disable(&enic->wq[queue_idx]); if (ret) return ret; - eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; + data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } void enic_start_rq(struct enic *enic, uint16_t queue_idx) { + struct rte_eth_dev_data *data = enic->dev_data; struct vnic_rq *rq_sop; struct vnic_rq *rq_data; rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; rq_data = &enic->rq[rq_sop->data_queue_idx]; - struct rte_eth_dev *eth_dev = enic->rte_dev; if (rq_data->in_use) { vnic_rq_enable(rq_data); @@ -771,13 +775,13 @@ void enic_start_rq(struct enic *enic, uint16_t queue_idx) rte_mb(); vnic_rq_enable(rq_sop); enic_initial_post_rx(enic, rq_sop); - eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; + data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; } int enic_stop_rq(struct enic *enic, uint16_t queue_idx) { + struct rte_eth_dev_data *data = enic->dev_data; int ret1 = 0, ret2 = 0; - struct rte_eth_dev *eth_dev = enic->rte_dev; struct vnic_rq *rq_sop; struct vnic_rq *rq_data; rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; @@ -793,7 +797,7 @@ int enic_stop_rq(struct enic *enic, uint16_t queue_idx) else if (ret1) return ret1; - eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; + data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -803,7 +807,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, { int rc; uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx); - uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx); + uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx, enic); struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx]; struct vnic_rq *rq_data = &enic->rq[data_queue_idx]; unsigned int mbuf_size, mbufs_per_pkt; @@ -1005,7 +1009,7 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, int err; struct vnic_wq *wq = &enic->wq[queue_idx]; unsigned int cq_index = enic_cq_wq(enic, queue_idx); - char name[NAME_MAX]; + char name[RTE_MEMZONE_NAMESIZE]; static int instance; wq->socket_id = socket_id; @@ -1039,7 +1043,7 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name, sizeof(uint32_t), SOCKET_ID_ANY, - RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN); + RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE); if (!wq->cqmsg_rz) return -ENOMEM; @@ -1064,6 +1068,7 @@ int enic_disable(struct enic *enic) vnic_dev_disable(enic->vdev); enic_clsf_destroy(enic); + enic_fm_destroy(enic); if (!enic_is_sriov_vf(enic)) vnic_dev_del_addr(enic->vdev, enic->mac_addr); @@ -1148,10 +1153,10 @@ static int enic_set_rsskey(struct enic *enic, uint8_t *user_key) dma_addr_t rss_key_buf_pa; union vnic_rss_key *rss_key_buf_va = NULL; int err, i; - u8 name[NAME_MAX]; + uint8_t name[RTE_MEMZONE_NAMESIZE]; RTE_ASSERT(user_key != NULL); - snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name); + snprintf((char *)name, sizeof(name), "rss_key-%s", enic->bdf_name); rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key), &rss_key_buf_pa, name); if (!rss_key_buf_va) @@ -1180,9 +1185,9 @@ int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu) dma_addr_t rss_cpu_buf_pa; union vnic_rss_cpu *rss_cpu_buf_va = NULL; int err; - u8 name[NAME_MAX]; + uint8_t name[RTE_MEMZONE_NAMESIZE]; - snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name); + snprintf((char *)name, sizeof(name), "rss_cpu-%s", enic->bdf_name); rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa, name); if (!rss_cpu_buf_va) @@ -1203,10 +1208,11 @@ int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu) return err; } -static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, - u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable) +static int enic_set_niccfg(struct enic *enic, uint8_t rss_default_cpu, + uint8_t rss_hash_type, uint8_t rss_hash_bits, uint8_t rss_base_cpu, + uint8_t rss_enable) { - const u8 tso_ipid_split_en = 0; + const uint8_t tso_ipid_split_en = 0; int err; err = enic_set_nic_cfg(enic, @@ -1302,8 +1308,8 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf) { struct rte_eth_dev *eth_dev; uint64_t rss_hf; - u8 rss_hash_type; - u8 rss_enable; + uint8_t rss_hash_type; + uint8_t rss_enable; int ret; RTE_ASSERT(rss_conf != NULL); @@ -1468,7 +1474,7 @@ enic_reinit_rq(struct enic *enic, unsigned int rq_idx) int rc = 0; sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)]; - data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)]; + data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx, enic)]; cq_idx = rq_idx; vnic_cq_clean(&enic->cq[cq_idx]); @@ -1491,8 +1497,8 @@ enic_reinit_rq(struct enic *enic, unsigned int rq_idx) if (data_rq->in_use) { vnic_rq_init_start(data_rq, enic_cq_rq(enic, - enic_rte_rq_idx_to_data_idx(rq_idx)), 0, - data_rq->ring.desc_count - 1, 1, 0); + enic_rte_rq_idx_to_data_idx(rq_idx, enic)), + 0, data_rq->ring.desc_count - 1, 1, 0); } rc = enic_alloc_rx_queue_mbufs(enic, sop_rq); @@ -1704,6 +1710,19 @@ static int enic_dev_init(struct enic *enic) /* set up link status checking */ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ + /* + * When Geneve with options offload is available, always disable it + * first as it can interfere with user flow rules. + */ + if (enic->geneve_opt_avail) { + /* + * Disabling fails if the feature is provisioned but + * not enabled. So ignore result and do not log error. + */ + vnic_dev_overlay_offload_ctrl(enic->vdev, + OVERLAY_FEATURE_GENEVE, + OVERLAY_OFFLOAD_DISABLE); + } enic->overlay_offload = false; if (enic->disable_overlay && enic->vxlan) { /* @@ -1735,20 +1754,32 @@ static int enic_dev_init(struct enic *enic) enic->overlay_offload = true; dev_info(enic, "Overlay offload is enabled\n"); } + /* Geneve with options offload requires overlay offload */ + if (enic->overlay_offload && enic->geneve_opt_avail && + enic->geneve_opt_request) { + if (vnic_dev_overlay_offload_ctrl(enic->vdev, + OVERLAY_FEATURE_GENEVE, + OVERLAY_OFFLOAD_ENABLE)) { + dev_err(enic, "failed to enable geneve+option\n"); + } else { + enic->geneve_opt_enabled = 1; + dev_info(enic, "Geneve with options is enabled\n"); + } + } /* * Reset the vxlan port if HW vxlan parsing is available. It * is always enabled regardless of overlay offload * enable/disable. */ if (enic->vxlan) { - enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT; + enic->vxlan_port = RTE_VXLAN_DEFAULT_PORT; /* * Reset the vxlan port to the default, as the NIC firmware * does not reset it automatically and keeps the old setting. */ if (vnic_dev_overlay_offload_cfg(enic->vdev, OVERLAY_CFG_VXLAN_PORT_UPDATE, - ENIC_DEFAULT_VXLAN_PORT)) { + RTE_VXLAN_DEFAULT_PORT)) { dev_err(enic, "failed to update vxlan port\n"); return -EINVAL; }