/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2018 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
* All rights reserved.
*/
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_tailq.h>
+#include <rte_alarm.h>
+#include <rte_cycles.h>
#include "bnxt.h"
#include "bnxt_filter.h"
#include "bnxt_hwrm.h"
#include "bnxt_ring.h"
#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
PMD_DRV_LOG(DEBUG, "Parse inner header\n");
break;
case RTE_FLOW_ITEM_TYPE_ETH:
- if (!item->spec || !item->mask)
+ if (!item->spec)
break;
eth_spec = item->spec;
- eth_mask = item->mask;
+
+ if (item->mask)
+ eth_mask = item->mask;
+ else
+ eth_mask = &rte_flow_item_eth_mask;
/* Source MAC address mask cannot be partially set.
* Should be All 0's or all 1's.
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
- vlan_mask = item->mask;
+
+ if (item->mask)
+ vlan_mask = item->mask;
+ else
+ vlan_mask = &rte_flow_item_vlan_mask;
+
if (en & en_ethertype) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
case RTE_FLOW_ITEM_TYPE_IPV4:
/* If mask is not involved, we could use EM filters. */
ipv4_spec = item->spec;
- ipv4_mask = item->mask;
- if (!item->spec || !item->mask)
+ if (!item->spec)
break;
+ if (item->mask)
+ ipv4_mask = item->mask;
+ else
+ ipv4_mask = &rte_flow_item_ipv4_mask;
+
/* Only IP DST and SRC fields are maskable. */
if (ipv4_mask->hdr.version_ihl ||
ipv4_mask->hdr.type_of_service ||
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ipv6_spec = item->spec;
- ipv6_mask = item->mask;
- if (!item->spec || !item->mask)
+ if (!item->spec)
break;
+ if (item->mask)
+ ipv6_mask = item->mask;
+ else
+ ipv6_mask = &rte_flow_item_ipv6_mask;
+
/* Only IP DST and SRC fields are maskable. */
if (ipv6_mask->hdr.vtc_flow ||
ipv6_mask->hdr.payload_len ||
break;
case RTE_FLOW_ITEM_TYPE_TCP:
tcp_spec = item->spec;
- tcp_mask = item->mask;
- if (!item->spec || !item->mask)
+ if (!item->spec)
break;
+ if (item->mask)
+ tcp_mask = item->mask;
+ else
+ tcp_mask = &rte_flow_item_tcp_mask;
+
/* Check TCP mask. Only DST & SRC ports are maskable */
if (tcp_mask->hdr.sent_seq ||
tcp_mask->hdr.recv_ack ||
break;
case RTE_FLOW_ITEM_TYPE_UDP:
udp_spec = item->spec;
- udp_mask = item->mask;
- if (!item->spec || !item->mask)
+ if (!item->spec)
break;
+ if (item->mask)
+ udp_mask = item->mask;
+ else
+ udp_mask = &rte_flow_item_udp_mask;
+
if (udp_mask->hdr.dgram_len ||
udp_mask->hdr.dgram_cksum) {
rte_flow_error_set(error,
}
/* Check if VNI is masked. */
- if (vxlan_spec && vxlan_mask) {
+ if (vxlan_mask != NULL) {
vni_masked =
!!memcmp(vxlan_mask->vni, vni_mask,
RTE_DIM(vni_mask));
}
PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
+ if (use_ntuple && !BNXT_RFS_NEEDS_VNIC(bp)) {
+ filter->flags =
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DEST_RFS_RING_IDX;
+ filter->dst_id = act_q->index;
+ goto skip_vnic_alloc;
+ }
+
vnic_id = attr->group;
if (!vnic_id) {
PMD_DRV_LOG(DEBUG, "Group id is 0\n");
PMD_DRV_LOG(DEBUG,
"Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
filter->dst_id = vnic->fw_vnic_id;
-
+skip_vnic_alloc:
/* For ntuple filter, create the L2 filter with default VNIC.
* The user specified redirect queue will be set while creating
* the ntuple filter in hardware.
vnic_id = attr->group;
BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
-
vnic = &bp->vnic_info[vnic_id];
/* Check if requested RSS config matches RSS config of VNIC
if (vnic->rx_queue_cnt > 1) {
vnic->hash_type =
bnxt_rte_to_hwrm_hash_types(rss->types);
+ vnic->hash_mode =
+ bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level);
if (!rss->key_len) {
/* If hash key has not been specified,
* use random hash key.
*/
- prandom_bytes(vnic->rss_hash_key,
- HW_HASH_KEY_SIZE);
+ bnxt_prandom_bytes(vnic->rss_hash_key,
+ HW_HASH_KEY_SIZE);
} else {
if (rss->key_len > HW_HASH_KEY_SIZE)
memcpy(vnic->rss_hash_key,
bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
break;
case RTE_FLOW_ACTION_TYPE_MARK:
- if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
- PMD_DRV_LOG(DEBUG,
- "Disable vector processing for mark\n");
- rte_flow_error_set(error,
- ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "Disable vector processing for mark");
- rc = -rte_errno;
- goto ret;
- }
-
if (bp->mark_table == NULL) {
rte_flow_error_set(error,
ENOMEM,
goto ret;
}
+ if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
+ PMD_DRV_LOG(DEBUG,
+ "Disabling vector processing for mark\n");
+ bp->eth_dev->rx_pkt_burst = bnxt_recv_pkts;
+ bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
+ }
+
filter->valid_flags |= BNXT_FLOW_MARK_FLAG;
filter->mark = ((const struct rte_flow_action_mark *)
act->conf)->id;
return 0;
}
+static void
+bnxt_setup_flow_counter(struct bnxt *bp)
+{
+ if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
+ !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) {
+ rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
+ bnxt_flow_cnt_alarm_cb,
+ (void *)bp);
+ bp->flags |= BNXT_FLAG_FC_THREAD;
+ }
+}
+
+void bnxt_flow_cnt_alarm_cb(void *arg)
+{
+ int rc = 0;
+ struct bnxt *bp = arg;
+
+ if (!bp->flow_stat->rx_fc_out_tbl.va) {
+ PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n");
+ bnxt_cancel_fc_thread(bp);
+ return;
+ }
+
+ if (!bp->flow_stat->flow_count) {
+ bnxt_cancel_fc_thread(bp);
+ return;
+ }
+
+ if (!bp->eth_dev->data->dev_started) {
+ bnxt_cancel_fc_thread(bp);
+ return;
+ }
+
+ rc = bnxt_flow_stats_req(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Flow stat alarm not rescheduled.\n");
+ return;
+ }
+
+ rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
+ bnxt_flow_cnt_alarm_cb,
+ (void *)bp);
+}
+
+
static struct rte_flow *
bnxt_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
bool update_flow = false;
struct rte_flow *flow;
int ret = 0;
- uint32_t tun_type;
+ uint32_t tun_type, flow_id;
if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
rte_flow_error_set(error, EINVAL,
filter->enables |=
HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
+ if (ret != 0) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create EM filter");
+ goto free_filter;
+ }
}
if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
filter->enables |=
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
+ if (ret != 0) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create ntuple filter");
+ goto free_filter;
+ }
}
- vnic = find_matching_vnic(bp, filter);
+ if (BNXT_RFS_NEEDS_VNIC(bp))
+ vnic = find_matching_vnic(bp, filter);
+ else
+ vnic = BNXT_GET_DEFAULT_VNIC(bp);
done:
if (!ret || update_flow) {
flow->filter = filter;
goto free_flow;
}
- STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
- PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
- STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
PMD_DRV_LOG(DEBUG,
"Mark action: mark id 0x%x, flow id 0x%x\n",
/* TCAM and EM should be 16-bit only.
* Other modes not supported.
*/
- bp->mark_table[filter->flow_id & BNXT_FLOW_ID_MASK] =
- filter->mark;
+ flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
+ if (bp->mark_table[flow_id].valid) {
+ rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Flow with mark id exists");
+ bnxt_clear_one_vnic_filter(bp, filter);
+ goto free_filter;
+ }
+ bp->mark_table[flow_id].valid = true;
+ bp->mark_table[flow_id].mark_id = filter->mark;
}
+
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+ STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
+
+ if (BNXT_FLOW_XSTATS_EN(bp))
+ bp->flow_stat->flow_count++;
bnxt_release_flow_lock(bp);
+ bnxt_setup_flow_counter(bp);
+ PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
return flow;
}
struct bnxt_filter_info *filter;
struct bnxt_vnic_info *vnic;
int ret = 0;
+ uint32_t flow_id;
filter = flow->filter;
vnic = flow->vnic;
PMD_DRV_LOG(ERR, "Could not find matching flow\n");
if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
- bp->mark_table[filter->flow_id & BNXT_FLOW_ID_MASK] = 0;
+ flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
+ memset(&bp->mark_table[flow_id], 0,
+ sizeof(bp->mark_table[flow_id]));
filter->flow_id = 0;
}
- if (filter->filter_type == HWRM_CFA_EM_FILTER)
- ret = bnxt_hwrm_clear_em_filter(bp, filter);
- if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
- ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
- ret = bnxt_hwrm_clear_l2_filter(bp, filter);
+ ret = bnxt_clear_one_vnic_filter(bp, filter);
done:
if (!ret) {
bnxt_free_filter(bp, filter);
STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
rte_free(flow);
+ if (BNXT_FLOW_XSTATS_EN(bp))
+ bp->flow_stat->flow_count--;
/* If this was the last flow associated with this vnic,
* switch the queue back to RSS pool.
return ret;
}
+void bnxt_cancel_fc_thread(struct bnxt *bp)
+{
+ bp->flags &= ~BNXT_FLAG_FC_THREAD;
+ rte_eal_alarm_cancel(bnxt_flow_cnt_alarm_cb, (void *)bp);
+}
+
static int
bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
{
break;
}
}
+
+ bnxt_cancel_fc_thread(bp);
bnxt_release_flow_lock(bp);
return ret;