X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_flow.c;h=59489b591a6f60224044fc02513447016153961a;hb=c6c90a33de906eb40a8eb01e16736cbaa2845b97;hp=cad232aba7da7576df565bf9abf407dc30092435;hpb=26f044cebd554555d678f0cb9c26b1c32b14b393;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c index cad232aba7..59489b591a 100644 --- a/drivers/net/bnxt/bnxt_flow.c +++ b/drivers/net/bnxt/bnxt_flow.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2014-2018 Broadcom + * Copyright(c) 2014-2021 Broadcom * All rights reserved. */ @@ -10,12 +10,15 @@ #include #include #include +#include +#include #include "bnxt.h" #include "bnxt_filter.h" #include "bnxt_hwrm.h" #include "bnxt_ring.h" #include "bnxt_rxq.h" +#include "bnxt_rxr.h" #include "bnxt_vnic.h" #include "hsi_struct_def_dpdk.h" @@ -77,7 +80,7 @@ bnxt_flow_non_void_action(const struct rte_flow_action *cur) static int bnxt_filter_type_check(const struct rte_flow_item pattern[], - struct rte_flow_error *error __rte_unused) + struct rte_flow_error *error) { const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern); @@ -177,14 +180,6 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, return -rte_errno; } - if (!item->spec || !item->mask) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "spec/mask is NULL"); - return -rte_errno; - } - switch (item->type) { case RTE_FLOW_ITEM_TYPE_ANY: inner = @@ -193,11 +188,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, PMD_DRV_LOG(DEBUG, "Parse inner header\n"); break; case RTE_FLOW_ITEM_TYPE_ETH: - if (!item->spec || !item->mask) + if (!item->spec) break; eth_spec = item->spec; - eth_mask = item->mask; + + if (item->mask) + eth_mask = item->mask; + else + eth_mask = &rte_flow_item_eth_mask; /* Source MAC address mask cannot be partially set. * Should be All 0's or all 1's. @@ -286,7 +285,12 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, break; case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; - vlan_mask = item->mask; + + if (item->mask) + vlan_mask = item->mask; + else + vlan_mask = &rte_flow_item_vlan_mask; + if (en & en_ethertype) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -329,11 +333,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, case RTE_FLOW_ITEM_TYPE_IPV4: /* If mask is not involved, we could use EM filters. */ ipv4_spec = item->spec; - ipv4_mask = item->mask; - if (!item->spec || !item->mask) + if (!item->spec) break; + if (item->mask) + ipv4_mask = item->mask; + else + ipv4_mask = &rte_flow_item_ipv4_mask; + /* Only IP DST and SRC fields are maskable. */ if (ipv4_mask->hdr.version_ihl || ipv4_mask->hdr.type_of_service || @@ -390,11 +398,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, break; case RTE_FLOW_ITEM_TYPE_IPV6: ipv6_spec = item->spec; - ipv6_mask = item->mask; - if (!item->spec || !item->mask) + if (!item->spec) break; + if (item->mask) + ipv6_mask = item->mask; + else + ipv6_mask = &rte_flow_item_ipv6_mask; + /* Only IP DST and SRC fields are maskable. */ if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || @@ -442,11 +454,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, break; case RTE_FLOW_ITEM_TYPE_TCP: tcp_spec = item->spec; - tcp_mask = item->mask; - if (!item->spec || !item->mask) + if (!item->spec) break; + if (item->mask) + tcp_mask = item->mask; + else + tcp_mask = &rte_flow_item_tcp_mask; + /* Check TCP mask. Only DST & SRC ports are maskable */ if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || @@ -487,11 +503,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, break; case RTE_FLOW_ITEM_TYPE_UDP: udp_spec = item->spec; - udp_mask = item->mask; - if (!item->spec || !item->mask) + if (!item->spec) break; + if (item->mask) + udp_mask = item->mask; + else + udp_mask = &rte_flow_item_udp_mask; + if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { rte_flow_error_set(error, @@ -559,7 +579,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, } /* Check if VNI is masked. */ - if (vxlan_spec && vxlan_mask) { + if (vxlan_mask != NULL) { vni_masked = !!memcmp(vxlan_mask->vni, vni_mask, RTE_DIM(vni_mask)); @@ -754,10 +774,9 @@ bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf) { struct bnxt_filter_info *mf, *f0; struct bnxt_vnic_info *vnic0; - struct rte_flow *flow; int i; - vnic0 = &bp->vnic_info[0]; + vnic0 = BNXT_GET_DEFAULT_VNIC(bp); f0 = STAILQ_FIRST(&vnic0->filter); /* This flow has same DST MAC as the port/l2 filter. */ @@ -770,8 +789,7 @@ bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf) if (vnic->fw_vnic_id == INVALID_VNIC_ID) continue; - STAILQ_FOREACH(flow, &vnic->flow_list, next) { - mf = flow->filter; + STAILQ_FOREACH(mf, &vnic->filter, next) { if (mf->matching_l2_fltr_ptr) continue; @@ -806,6 +824,8 @@ bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, if (filter1 == NULL) return NULL; + memcpy(filter1, nf, sizeof(*filter1)); + filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE; filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG || @@ -875,7 +895,6 @@ bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, bnxt_free_filter(bp, filter1); return NULL; } - filter1->l2_ref_cnt++; return filter1; } @@ -888,42 +907,71 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, l2_filter = bnxt_find_matching_l2_filter(bp, nf); if (l2_filter) { l2_filter->l2_ref_cnt++; - nf->matching_l2_fltr_ptr = l2_filter; } else { l2_filter = bnxt_create_l2_filter(bp, nf, vnic); - nf->matching_l2_fltr_ptr = NULL; + if (l2_filter) { + STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next); + l2_filter->vnic = vnic; + } } + nf->matching_l2_fltr_ptr = l2_filter; return l2_filter; } -static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic) +static void bnxt_vnic_cleanup(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + if (vnic->rx_queue_cnt > 1) + bnxt_hwrm_vnic_ctx_free(bp, vnic); + + bnxt_hwrm_vnic_free(bp, vnic); + + rte_free(vnic->fw_grp_ids); + vnic->fw_grp_ids = NULL; + + vnic->rx_queue_cnt = 0; +} + +static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic, + const struct rte_flow_action *act, + struct rte_flow_error *error) { struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; uint64_t rx_offloads = dev_conf->rxmode.offloads; int rc; + if (bp->nr_vnics > bp->max_vnics - 1) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "Group id is invalid"); + rc = bnxt_vnic_grp_alloc(bp, vnic); if (rc) - goto ret; + return rte_flow_error_set(error, -rc, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Failed to alloc VNIC group"); rc = bnxt_hwrm_vnic_alloc(bp, vnic); if (rc) { - PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc); + rte_flow_error_set(error, -rc, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Failed to alloc VNIC"); goto ret; } - bp->nr_vnics++; /* RSS context is required only when there is more than one RSS ring */ if (vnic->rx_queue_cnt > 1) { - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */); + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); if (rc) { - PMD_DRV_LOG(ERR, - "HWRM vnic ctx alloc failure: %x\n", rc); + rte_flow_error_set(error, -rc, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Failed to alloc VNIC context"); goto ret; } - } else { - PMD_DRV_LOG(DEBUG, "No RSS context required\n"); } if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) @@ -932,12 +980,29 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic) vnic->vlan_strip = false; rc = bnxt_hwrm_vnic_cfg(bp, vnic); - if (rc) + if (rc) { + rte_flow_error_set(error, -rc, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Failed to configure VNIC"); goto ret; + } - bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); + rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); + if (rc) { + rte_flow_error_set(error, -rc, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Failed to configure VNIC plcmode"); + goto ret; + } + + bp->nr_vnics++; + + return 0; ret: + bnxt_vnic_cleanup(bp, vnic); return rc; } @@ -999,6 +1064,7 @@ bnxt_update_filter_flags_en(struct bnxt_filter_info *filter, } filter->fw_l2_filter_id = filter1->fw_l2_filter_id; filter->l2_ref_cnt = filter1->l2_ref_cnt; + filter->flow_id = filter1->flow_id; PMD_DRV_LOG(DEBUG, "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n", filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt); @@ -1041,6 +1107,8 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; use_ntuple = bnxt_filter_type_check(pattern, error); + +start: switch (act->type) { case RTE_FLOW_ACTION_TYPE_QUEUE: /* Allow this flow. Redirect to a VNIC. */ @@ -1056,22 +1124,22 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, } PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index); + if (use_ntuple && !BNXT_RFS_NEEDS_VNIC(bp)) { + filter->flags = + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DEST_RFS_RING_IDX; + filter->dst_id = act_q->index; + goto skip_vnic_alloc; + } + vnic_id = attr->group; if (!vnic_id) { PMD_DRV_LOG(DEBUG, "Group id is 0\n"); vnic_id = act_q->index; } + BNXT_VALID_VNIC_OR_RET(bp, vnic_id); + vnic = &bp->vnic_info[vnic_id]; - if (vnic == NULL) { - rte_flow_error_set(error, - EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "No matching VNIC found."); - rc = -rte_errno; - goto ret; - } if (vnic->rx_queue_cnt) { if (vnic->start_grp_id != act_q->index) { PMD_DRV_LOG(ERR, @@ -1093,9 +1161,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, vnic->fw_vnic_id != INVALID_HW_RING_ID) goto use_vnic; - if (!rxq || - bp->vnic_info[0].fw_grp_ids[act_q->index] != - INVALID_HW_RING_ID) { + if (!rxq) { PMD_DRV_LOG(ERR, "Queue invalid or used with other VNIC\n"); rte_flow_error_set(error, @@ -1116,16 +1182,9 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, PMD_DRV_LOG(DEBUG, "VNIC found\n"); - rc = bnxt_vnic_prep(bp, vnic); - if (rc) { - rte_flow_error_set(error, - EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "VNIC prep fail"); - rc = -rte_errno; + rc = bnxt_vnic_prep(bp, vnic, act, error); + if (rc) goto ret; - } PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", @@ -1136,7 +1195,16 @@ use_vnic: PMD_DRV_LOG(DEBUG, "Setting vnic ff_idx %d\n", vnic->ff_pool_idx); filter->dst_id = vnic->fw_vnic_id; - filter1 = bnxt_get_l2_filter(bp, filter, vnic); +skip_vnic_alloc: + /* For ntuple filter, create the L2 filter with default VNIC. + * The user specified redirect queue will be set while creating + * the ntuple filter in hardware. + */ + vnic0 = BNXT_GET_DEFAULT_VNIC(bp); + if (use_ntuple) + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + else + filter1 = bnxt_get_l2_filter(bp, filter, vnic); if (filter1 == NULL) { rte_flow_error_set(error, ENOSPC, @@ -1189,6 +1257,7 @@ use_vnic: } filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + filter->flow_id = filter1->flow_id; filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER; break; case RTE_FLOW_ACTION_TYPE_VF: @@ -1257,33 +1326,15 @@ use_vnic: } filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + filter->flow_id = filter1->flow_id; break; case RTE_FLOW_ACTION_TYPE_RSS: rss = (const struct rte_flow_action_rss *)act->conf; vnic_id = attr->group; - if (!vnic_id) { - PMD_DRV_LOG(ERR, "Group id cannot be 0\n"); - rte_flow_error_set(error, - EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR, - NULL, - "Group id cannot be 0"); - rc = -rte_errno; - goto ret; - } + BNXT_VALID_VNIC_OR_RET(bp, vnic_id); vnic = &bp->vnic_info[vnic_id]; - if (vnic == NULL) { - rte_flow_error_set(error, - EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "No matching VNIC for RSS group."); - rc = -rte_errno; - goto ret; - } - PMD_DRV_LOG(DEBUG, "VNIC found\n"); /* Check if requested RSS config matches RSS config of VNIC * only if it is not a fresh VNIC configuration. @@ -1344,16 +1395,9 @@ use_vnic: vnic->end_grp_id = rss->queue[rss->queue_num - 1]; vnic->func_default = 0; //This is not a default VNIC. - rc = bnxt_vnic_prep(bp, vnic); - if (rc) { - rte_flow_error_set(error, - EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "VNIC prep fail"); - rc = -rte_errno; + rc = bnxt_vnic_prep(bp, vnic, act, error); + if (rc) goto ret; - } PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", @@ -1382,13 +1426,15 @@ use_vnic: if (vnic->rx_queue_cnt > 1) { vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss->types); + vnic->hash_mode = + bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level); if (!rss->key_len) { /* If hash key has not been specified, * use random hash key. */ - prandom_bytes(vnic->rss_hash_key, - HW_HASH_KEY_SIZE); + bnxt_prandom_bytes(vnic->rss_hash_key, + HW_HASH_KEY_SIZE); } else { if (rss->key_len > HW_HASH_KEY_SIZE) memcpy(vnic->rss_hash_key, @@ -1420,6 +1466,29 @@ vnic_found: PMD_DRV_LOG(DEBUG, "L2 filter created\n"); bnxt_update_filter_flags_en(filter, filter1, use_ntuple); break; + case RTE_FLOW_ACTION_TYPE_MARK: + if (bp->mark_table == NULL) { + rte_flow_error_set(error, + ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Mark table not allocated."); + rc = -rte_errno; + goto ret; + } + + if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { + PMD_DRV_LOG(DEBUG, + "Disabling vector processing for mark\n"); + bp->eth_dev->rx_pkt_burst = bnxt_recv_pkts; + bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; + } + + filter->valid_flags |= BNXT_FLOW_MARK_FLAG; + filter->mark = ((const struct rte_flow_action_mark *) + act->conf)->id; + PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark); + break; default: rte_flow_error_set(error, EINVAL, @@ -1430,27 +1499,19 @@ vnic_found: goto ret; } - if (filter1 && !filter->matching_l2_fltr_ptr) { - bnxt_free_filter(bp, filter1); - filter1->fw_l2_filter_id = -1; - } - done: act = bnxt_flow_non_void_action(++act); - if (act->type != RTE_FLOW_ACTION_TYPE_END) { - rte_flow_error_set(error, - EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "Invalid action."); - rc = -rte_errno; - goto ret; - } + while (act->type != RTE_FLOW_ACTION_TYPE_END) + goto start; return rc; ret: - //TODO: Cleanup according to ACTION TYPE. + if (filter1) { + bnxt_hwrm_clear_l2_filter(bp, filter1); + bnxt_free_filter(bp, filter1); + } + if (rte_errno) { if (vnic && STAILQ_EMPTY(&vnic->filter)) vnic->rx_queue_cnt = 0; @@ -1458,7 +1519,7 @@ ret: if (rxq && !vnic->rx_queue_cnt) rxq->vnic = &bp->vnic_info[0]; } - return rc; + return -rte_errno; } static @@ -1501,9 +1562,11 @@ bnxt_flow_validate(struct rte_eth_dev *dev, filter = bnxt_get_unused_filter(bp); if (filter == NULL) { - PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); + rte_flow_error_set(error, ENOSPC, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Not enough resources for a new flow"); bnxt_release_flow_lock(bp); - return -ENOMEM; + return -ENOSPC; } ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, @@ -1514,10 +1577,8 @@ bnxt_flow_validate(struct rte_eth_dev *dev, vnic = find_matching_vnic(bp, filter); if (vnic) { if (STAILQ_EMPTY(&vnic->filter)) { - rte_free(vnic->fw_grp_ids); - bnxt_hwrm_vnic_ctx_free(bp, vnic); - bnxt_hwrm_vnic_free(bp, vnic); - vnic->rx_queue_cnt = 0; + bnxt_vnic_cleanup(bp, vnic); + bp->nr_vnics--; PMD_DRV_LOG(DEBUG, "Free VNIC\n"); } } @@ -1531,7 +1592,6 @@ bnxt_flow_validate(struct rte_eth_dev *dev, exit: /* No need to hold on to this filter if we are just validating flow */ - filter->fw_l2_filter_id = UINT64_MAX; bnxt_free_filter(bp, filter); bnxt_release_flow_lock(bp); @@ -1547,10 +1607,13 @@ bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter, * filter which points to the new destination queue and so we clear * the previous L2 filter. For ntuple filters, we are going to reuse * the old L2 filter and create new NTUPLE filter with this new - * destination queue subsequently during bnxt_flow_create. + * destination queue subsequently during bnxt_flow_create. So we + * decrement the ref cnt of the L2 filter that would've been bumped + * up previously in bnxt_validate_and_parse_flow as the old n-tuple + * filter that was referencing it will be deleted now. */ + bnxt_hwrm_clear_l2_filter(bp, old_filter); if (new_filter->filter_type == HWRM_CFA_L2_FILTER) { - bnxt_hwrm_clear_l2_filter(bp, old_filter); bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter); } else { if (new_filter->filter_type == HWRM_CFA_EM_FILTER) @@ -1625,6 +1688,51 @@ bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf) return 0; } +static void +bnxt_setup_flow_counter(struct bnxt *bp) +{ + if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && + !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) { + rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, + bnxt_flow_cnt_alarm_cb, + (void *)bp); + bp->flags |= BNXT_FLAG_FC_THREAD; + } +} + +void bnxt_flow_cnt_alarm_cb(void *arg) +{ + int rc = 0; + struct bnxt *bp = arg; + + if (!bp->flow_stat->rx_fc_out_tbl.va) { + PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n"); + bnxt_cancel_fc_thread(bp); + return; + } + + if (!bp->flow_stat->flow_count) { + bnxt_cancel_fc_thread(bp); + return; + } + + if (!bp->eth_dev->data->dev_started) { + bnxt_cancel_fc_thread(bp); + return; + } + + rc = bnxt_flow_stats_req(bp); + if (rc) { + PMD_DRV_LOG(ERR, "Flow stat alarm not rescheduled.\n"); + return; + } + + rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, + bnxt_flow_cnt_alarm_cb, + (void *)bp); +} + + static struct rte_flow * bnxt_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -1638,7 +1746,7 @@ bnxt_flow_create(struct rte_eth_dev *dev, bool update_flow = false; struct rte_flow *flow; int ret = 0; - uint32_t tun_type; + uint32_t tun_type, flow_id; if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { rte_flow_error_set(error, EINVAL, @@ -1673,7 +1781,9 @@ bnxt_flow_create(struct rte_eth_dev *dev, filter = bnxt_get_unused_filter(bp); if (filter == NULL) { - PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); + rte_flow_error_set(error, ENOSPC, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Not enough resources for a new flow"); goto free_flow; } @@ -1739,15 +1849,30 @@ bnxt_flow_create(struct rte_eth_dev *dev, filter->enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID; ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); + if (ret != 0) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create EM filter"); + goto free_filter; + } } if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { filter->enables |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); + if (ret != 0) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create ntuple filter"); + goto free_filter; + } } - vnic = find_matching_vnic(bp, filter); + if (BNXT_RFS_NEEDS_VNIC(bp)) + vnic = find_matching_vnic(bp, filter); + else + vnic = BNXT_GET_DEFAULT_VNIC(bp); done: if (!ret || update_flow) { flow->filter = filter; @@ -1757,10 +1882,35 @@ done: goto free_flow; } + if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { + PMD_DRV_LOG(DEBUG, + "Mark action: mark id 0x%x, flow id 0x%x\n", + filter->mark, filter->flow_id); + + /* TCAM and EM should be 16-bit only. + * Other modes not supported. + */ + flow_id = filter->flow_id & BNXT_FLOW_ID_MASK; + if (bp->mark_table[flow_id].valid) { + rte_flow_error_set(error, EEXIST, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Flow with mark id exists"); + bnxt_clear_one_vnic_filter(bp, filter); + goto free_filter; + } + bp->mark_table[flow_id].valid = true; + bp->mark_table[flow_id].mark_id = filter->mark; + } + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); - PMD_DRV_LOG(ERR, "Successfully created flow.\n"); STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); + + if (BNXT_FLOW_XSTATS_EN(bp)) + bp->flow_stat->flow_count++; bnxt_release_flow_lock(bp); + bnxt_setup_flow_counter(bp); + PMD_DRV_LOG(DEBUG, "Successfully created flow.\n"); return flow; } @@ -1775,7 +1925,7 @@ free_flow: rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Flow with pattern exists, updating destination queue"); - else + else if (!rte_errno) rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to create flow."); @@ -1816,68 +1966,58 @@ static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp, /* Tunnel doesn't belong to this VF, so don't send HWRM * cmd, just delete the flow from driver */ - if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id)) + if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id)) { PMD_DRV_LOG(ERR, "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n"); - else + } else { ret = bnxt_hwrm_tunnel_redirect_free(bp, filter->tunnel_type); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Unable to free tunnel redirection"); + return ret; + } + } } return ret; } static int -bnxt_flow_destroy(struct rte_eth_dev *dev, - struct rte_flow *flow, - struct rte_flow_error *error) +_bnxt_flow_destroy(struct bnxt *bp, + struct rte_flow *flow, + struct rte_flow_error *error) { - struct bnxt *bp = dev->data->dev_private; struct bnxt_filter_info *filter; struct bnxt_vnic_info *vnic; int ret = 0; - - bnxt_acquire_flow_lock(bp); - if (!flow) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Invalid flow: failed to destroy flow."); - bnxt_release_flow_lock(bp); - return -EINVAL; - } + uint32_t flow_id; filter = flow->filter; vnic = flow->vnic; - if (!filter) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Invalid flow: failed to destroy flow."); - bnxt_release_flow_lock(bp); - return -EINVAL; - } - if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && filter->enables == filter->tunnel_type) { - ret = bnxt_handle_tunnel_redirect_destroy(bp, - filter, - error); - if (!ret) { + ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error); + if (!ret) goto done; - } else { - bnxt_release_flow_lock(bp); + else return ret; - } } ret = bnxt_match_filter(bp, filter); if (ret == 0) PMD_DRV_LOG(ERR, "Could not find matching flow\n"); - if (filter->filter_type == HWRM_CFA_EM_FILTER) - ret = bnxt_hwrm_clear_em_filter(bp, filter); - if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) - ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); - ret = bnxt_hwrm_clear_l2_filter(bp, filter); + if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { + flow_id = filter->flow_id & BNXT_FLOW_ID_MASK; + memset(&bp->mark_table[flow_id], 0, + sizeof(bp->mark_table[flow_id])); + filter->flow_id = 0; + } + + ret = bnxt_clear_one_vnic_filter(bp, filter); done: if (!ret) { @@ -1894,18 +2034,16 @@ done: bnxt_free_filter(bp, filter); STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); rte_free(flow); + if (BNXT_FLOW_XSTATS_EN(bp)) + bp->flow_stat->flow_count--; /* If this was the last flow associated with this vnic, * switch the queue back to RSS pool. */ if (vnic && !vnic->func_default && STAILQ_EMPTY(&vnic->flow_list)) { - rte_free(vnic->fw_grp_ids); - if (vnic->rx_queue_cnt > 1) - bnxt_hwrm_vnic_ctx_free(bp, vnic); - - bnxt_hwrm_vnic_free(bp, vnic); - vnic->rx_queue_cnt = 0; + bnxt_vnic_cleanup(bp, vnic); + bp->nr_vnics--; } } else { rte_flow_error_set(error, -ret, @@ -1913,15 +2051,49 @@ done: "Failed to destroy flow."); } + return ret; +} + +static int +bnxt_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct bnxt *bp = dev->data->dev_private; + int ret = 0; + + bnxt_acquire_flow_lock(bp); + if (!flow) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid flow: failed to destroy flow."); + bnxt_release_flow_lock(bp); + return -EINVAL; + } + + if (!flow->filter) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid flow: failed to destroy flow."); + bnxt_release_flow_lock(bp); + return -EINVAL; + } + ret = _bnxt_flow_destroy(bp, flow, error); bnxt_release_flow_lock(bp); + return ret; } +void bnxt_cancel_fc_thread(struct bnxt *bp) +{ + bp->flags &= ~BNXT_FLAG_FC_THREAD; + rte_eal_alarm_cancel(bnxt_flow_cnt_alarm_cb, (void *)bp); +} + static int bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct bnxt *bp = dev->data->dev_private; - struct bnxt_filter_info *filter = NULL; struct bnxt_vnic_info *vnic; struct rte_flow *flow; unsigned int i; @@ -1935,66 +2107,19 @@ bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) while (!STAILQ_EMPTY(&vnic->flow_list)) { flow = STAILQ_FIRST(&vnic->flow_list); - filter = flow->filter; - - if (filter->filter_type == - HWRM_CFA_TUNNEL_REDIRECT_FILTER && - filter->enables == filter->tunnel_type) { - ret = - bnxt_handle_tunnel_redirect_destroy(bp, - filter, - error); - if (!ret) { - goto done; - } else { - bnxt_release_flow_lock(bp); - return ret; - } - } - - if (filter->filter_type == HWRM_CFA_EM_FILTER) - ret = bnxt_hwrm_clear_em_filter(bp, filter); - if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) - ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); - else if (i) - ret = bnxt_hwrm_clear_l2_filter(bp, filter); - if (ret) { - rte_flow_error_set - (error, - -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, - "Failed to flush flow in HW."); - bnxt_release_flow_lock(bp); - return -rte_errno; - } -done: - STAILQ_REMOVE(&vnic->flow_list, flow, - rte_flow, next); - - STAILQ_REMOVE(&vnic->filter, - filter, - bnxt_filter_info, - next); - bnxt_free_filter(bp, filter); - - rte_free(flow); + if (!flow->filter) + continue; - /* If this was the last flow associated with this vnic, - * switch the queue back to RSS pool. - */ - if (STAILQ_EMPTY(&vnic->flow_list)) { - rte_free(vnic->fw_grp_ids); - if (vnic->rx_queue_cnt > 1) - bnxt_hwrm_vnic_ctx_free(bp, vnic); - bnxt_hwrm_vnic_free(bp, vnic); - vnic->rx_queue_cnt = 0; - } + ret = _bnxt_flow_destroy(bp, flow, error); + if (ret) + break; } } + bnxt_cancel_fc_thread(bp); bnxt_release_flow_lock(bp); + return ret; }