Add support for RTE_FLOW_ACTION_TYPE_MARK.
Use the flow_id provided by FW during flow creation to lookup the
mark id provided by the application.
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Lance Richardson <lance.richardson@broadcom.com>
#define BNXT_FLAG_INIT_DONE BIT(21)
#define BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS BIT(22)
#define BNXT_FLAG_ADV_FLOW_MGMT BIT(23)
+#define BNXT_FLAG_RX_VECTOR_PKT_MODE BIT(24)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
/* Struct to hold adapter error recovery related info */
struct bnxt_error_recovery_info *recovery_info;
+#define BNXT_MARK_TABLE_SZ (sizeof(uint32_t) * 64 * 1024)
+/* TCAM and EM should be 16-bit only. Other modes not supported. */
+#define BNXT_FLOW_ID_MASK 0x0000ffff
+ uint32_t *mark_table;
};
int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu);
}
bnxt_print_link_info(bp->eth_dev);
+ bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0);
+ if (!bp->mark_table)
+ PMD_DRV_LOG(ERR, "Allocation of mark table failed\n");
+
return 0;
err_free:
}
static eth_rx_burst_t
-bnxt_receive_function(__rte_unused struct rte_eth_dev *eth_dev)
+bnxt_receive_function(struct rte_eth_dev *eth_dev)
{
+ struct bnxt *bp = eth_dev->data->dev_private;
+
#ifdef RTE_ARCH_X86
#ifndef RTE_LIBRTE_IEEE1588
/*
DEV_RX_OFFLOAD_VLAN_FILTER))) {
PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
eth_dev->data->port_id);
+ bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
return bnxt_recv_pkts_vec;
}
PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
eth_dev->data->dev_conf.rxmode.offloads);
#endif
#endif
+ bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
return bnxt_recv_pkts;
}
bnxt_int_handler(eth_dev);
bnxt_shutdown_nic(bp);
bnxt_hwrm_if_change(bp, 0);
+ memset(bp->mark_table, 0, BNXT_MARK_TABLE_SZ);
+ bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
bp->dev_stopped = 1;
bp->rx_cosq_cnt = 0;
}
bp->grp_info = NULL;
}
+ rte_free(bp->mark_table);
+ bp->mark_table = NULL;
+
bnxt_dev_uninit(eth_dev);
}
bp = eth_dev->data->dev_private;
bp->dev_stopped = 1;
+ bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
if (bnxt_vf_pciid(pci_dev->id.device_id))
bp->flags |= BNXT_FLAG_VF;
#define BNXT_FLOW_L2_INNER_DST_VALID_FLAG BIT(4)
#define BNXT_FLOW_L2_DROP_FLAG BIT(5)
#define BNXT_FLOW_PARSE_INNER_FLAG BIT(6)
+#define BNXT_FLOW_MARK_FLAG BIT(7)
struct bnxt_filter_info {
STAILQ_ENTRY(bnxt_filter_info) next;
+ uint32_t flow_id;
uint64_t fw_l2_filter_id;
struct bnxt_filter_info *matching_l2_fltr_ptr;
uint64_t fw_em_filter_id;
* to remember which vnic it was created on
*/
struct bnxt_vnic_info *vnic;
+ uint32_t mark;
};
struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp);
}
filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
filter->l2_ref_cnt = filter1->l2_ref_cnt;
+ filter->flow_id = filter1->flow_id;
PMD_DRV_LOG(DEBUG,
"l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
use_ntuple = bnxt_filter_type_check(pattern, error);
+
+start:
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
/* Allow this flow. Redirect to a VNIC. */
}
filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ filter->flow_id = filter1->flow_id;
filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
break;
case RTE_FLOW_ACTION_TYPE_VF:
}
filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ filter->flow_id = filter1->flow_id;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
rss = (const struct rte_flow_action_rss *)act->conf;
PMD_DRV_LOG(DEBUG, "L2 filter created\n");
bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
+ PMD_DRV_LOG(DEBUG,
+ "Disable vector processing for mark\n");
+ rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Disable vector processing for mark");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ if (bp->mark_table == NULL) {
+ rte_flow_error_set(error,
+ ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Mark table not allocated.");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ filter->valid_flags |= BNXT_FLOW_MARK_FLAG;
+ filter->mark = ((const struct rte_flow_action_mark *)
+ act->conf)->id;
+ PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark);
+ break;
default:
rte_flow_error_set(error,
EINVAL,
done:
act = bnxt_flow_non_void_action(++act);
- if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- rte_flow_error_set(error,
- EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "Invalid action.");
- rc = -rte_errno;
- goto ret;
- }
+ while (act->type != RTE_FLOW_ACTION_TYPE_END)
+ goto start;
return rc;
ret:
- //TODO: Cleanup according to ACTION TYPE.
+ if (filter1) {
+ bnxt_hwrm_clear_l2_filter(bp, filter1);
+ bnxt_free_filter(bp, filter1);
+ }
+
if (rte_errno) {
if (vnic && STAILQ_EMPTY(&vnic->filter))
vnic->rx_queue_cnt = 0;
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
+ if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
+ PMD_DRV_LOG(DEBUG,
+ "Mark action: mark id 0x%x, flow id 0x%x\n",
+ filter->mark, filter->flow_id);
+
+ /* TCAM and EM should be 16-bit only.
+ * Other modes not supported.
+ */
+ bp->mark_table[filter->flow_id & BNXT_FLOW_ID_MASK] =
+ filter->mark;
+ }
bnxt_release_flow_lock(bp);
return flow;
}
if (ret == 0)
PMD_DRV_LOG(ERR, "Could not find matching flow\n");
+ if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
+ bp->mark_table[filter->flow_id & BNXT_FLOW_ID_MASK] = 0;
+ filter->flow_id = 0;
+ }
+
if (filter->filter_type == HWRM_CFA_EM_FILTER)
ret = bnxt_hwrm_clear_em_filter(bp, filter);
if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
HWRM_CHECK_RESULT();
filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
+ filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
HWRM_UNLOCK();
return rc;
HWRM_CHECK_RESULT();
filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
+ filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
HWRM_UNLOCK();
return rc;
mbuf->hash.rss = rxcmp->rss_hash;
mbuf->ol_flags |= PKT_RX_RSS_HASH;
} else {
- mbuf->hash.fdir.id = rxcmp1->cfa_code;
+ mbuf->hash.fdir.id = bnxt_get_cfa_code_or_mark_id(rxq->bp,
+ rxcmp1);
mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
}
#ifdef RTE_LIBRTE_IEEE1588
return 0;
}
+
+uint32_t bnxt_get_cfa_code_or_mark_id(struct bnxt *bp,
+ struct rx_pkt_cmpl_hi *rxcmp1)
+{
+ uint32_t cfa_code = 0;
+ uint8_t meta_fmt = 0;
+ uint16_t flags2 = 0;
+ uint32_t meta = 0;
+
+ cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
+ if (!cfa_code)
+ return 0;
+
+ if (cfa_code && !bp->mark_table[cfa_code])
+ return cfa_code;
+
+ flags2 = rte_le_to_cpu_16(rxcmp1->flags2);
+ meta = rte_le_to_cpu_32(rxcmp1->metadata);
+ if (meta) {
+ meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
+
+ /*
+ * The flags field holds extra bits of info from [6:4]
+ * which indicate if the flow is in TCAM or EM or EEM
+ */
+ meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
+ BNXT_CFA_META_FMT_SHFT;
+
+ /*
+ * meta_fmt == 4 => 'b100 => 'b10x => EM.
+ * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN
+ * meta_fmt == 6 => 'b110 => 'b11x => EEM
+ * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN.
+ */
+ meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT;
+ }
+ return bp->mark_table[cfa_code];
+}
int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq);
#endif
+uint32_t bnxt_get_cfa_code_or_mark_id(struct bnxt *bp,
+ struct rx_pkt_cmpl_hi *rxcmp1);
+#define BNXT_RX_META_CFA_CODE_SHIFT 19
+#define BNXT_CFA_CODE_META_SHIFT 16
+#define BNXT_RX_META_CFA_CODE_INT_ACT_REC_BIT 0x8000000
+#define BNXT_RX_META_CFA_CODE_EEM_BIT 0x4000000
+#define BNXT_CFA_META_FMT_MASK 0x70
+#define BNXT_CFA_META_FMT_SHFT 4
+#define BNXT_CFA_META_FMT_EM_EEM_SHFT 1
+#define BNXT_CFA_META_FMT_EEM 3
+
#endif