#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_udp.h>
+#include <rte_memzone.h>
#include "enic_compat.h"
#include "enic.h"
#include "vnic_nic.h"
#define IP_DEFTTL 64 /* from RFC 1340. */
-#define IP_VERSION 0x40
-#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
-#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
#define IP6_VTC_FLOW 0x60000000
/* Highest Item type supported by Flowman */
RTE_FLOW_ACTION_TYPE_FLAG,
RTE_FLOW_ACTION_TYPE_JUMP,
RTE_FLOW_ACTION_TYPE_MARK,
+ RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
RTE_FLOW_ACTION_TYPE_PORT_ID,
RTE_FLOW_ACTION_TYPE_PASSTHRU,
RTE_FLOW_ACTION_TYPE_QUEUE,
RTE_FLOW_ACTION_TYPE_COUNT,
RTE_FLOW_ACTION_TYPE_DROP,
RTE_FLOW_ACTION_TYPE_JUMP,
+ RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
RTE_FLOW_ACTION_TYPE_PASSTHRU,
RTE_FLOW_ACTION_TYPE_VOID,
RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
struct fm_exact_match_table *cmd;
struct fm_header_set *hdr;
struct enic_fm_fet *fet;
- u64 args[3];
+ uint64_t args[3];
int ret;
ENICPMD_FUNC_TRACE();
return 0;
}
-/* Steer operations need to appear before other ops */
+/* NIC requires that 1st steer appear before decap.
+ * Correct example: steer, decap, steer, steer, ...
+ */
static void
enic_fm_reorder_action_op(struct enic_flowman *fm)
{
- struct fm_action_op *dst, *dst_head, *src, *src_head;
+ struct fm_action_op *op, *steer, *decap;
+ struct fm_action_op tmp_op;
ENICPMD_FUNC_TRACE();
- /* Move steer ops to the front. */
- src = fm->action.fma_action_ops;
- src_head = src;
- dst = fm->action_tmp.fma_action_ops;
- dst_head = dst;
- /* Copy steer ops to tmp */
- while (src->fa_op != FMOP_END) {
- if (src->fa_op == FMOP_RQ_STEER) {
- ENICPMD_LOG(DEBUG, "move op: %ld -> dst %ld",
- (long)(src - src_head),
- (long)(dst - dst_head));
- *dst = *src;
- dst++;
- }
- src++;
- }
- /* Then append non-steer ops */
- src = src_head;
- while (src->fa_op != FMOP_END) {
- if (src->fa_op != FMOP_RQ_STEER) {
- ENICPMD_LOG(DEBUG, "move op: %ld -> dst %ld",
- (long)(src - src_head),
- (long)(dst - dst_head));
- *dst = *src;
- dst++;
- }
- src++;
+ /* Find 1st steer and decap */
+ op = fm->action.fma_action_ops;
+ steer = NULL;
+ decap = NULL;
+ while (op->fa_op != FMOP_END) {
+ if (!decap && op->fa_op == FMOP_DECAP_NOSTRIP)
+ decap = op;
+ else if (!steer && op->fa_op == FMOP_RQ_STEER)
+ steer = op;
+ op++;
+ }
+ /* If decap is before steer, swap */
+ if (steer && decap && decap < steer) {
+ op = fm->action.fma_action_ops;
+ ENICPMD_LOG(DEBUG, "swap decap %ld <-> steer %ld",
+ (long)(decap - op), (long)(steer - op));
+ tmp_op = *decap;
+ *decap = *steer;
+ *steer = tmp_op;
}
- /* Copy END */
- *dst = *src;
- /* Finally replace the original action with the reordered one */
- memcpy(fm->action.fma_action_ops, fm->action_tmp.fma_action_ops,
- sizeof(fm->action.fma_action_ops));
}
/* VXLAN decap is done via flowman compound action */
sizeof(struct rte_vxlan_hdr);
append_template(&template, &off, item->spec,
sizeof(struct rte_ipv4_hdr));
- ip4->version_ihl = IP_VHL_DEF;
+ ip4->version_ihl = RTE_IPV4_VHL_DEF;
if (ip4->time_to_live == 0)
ip4->time_to_live = IP_DEFTTL;
ip4->next_proto_id = IPPROTO_UDP;
uint64_t *handle)
{
uint32_t bdf;
- u64 args[2];
+ uint64_t args[2];
int rc;
ENICPMD_FUNC_TRACE();
{
enum {
FATE = 1 << 0,
- MARK = 1 << 1,
+ DECAP = 1 << 1,
PASSTHRU = 1 << 2,
COUNT = 1 << 3,
ENCAP = 1 << 4,
+ PUSH_VLAN = 1 << 5,
};
struct fm_tcam_match_entry *fmt;
struct fm_action_op fm_op;
+ bool need_ovlan_action;
struct enic *enic;
uint32_t overlap;
uint64_t vnic_h;
+ uint16_t ovlan;
bool first_rq;
int ret;
ENICPMD_FUNC_TRACE();
fmt = &fm->tcam_entry;
+ need_ovlan_action = false;
+ ovlan = 0;
first_rq = true;
enic = fm->enic;
overlap = 0;
const struct rte_flow_action_mark *mark =
actions->conf;
- if (overlap & MARK)
- goto unsupported;
- overlap |= MARK;
if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
break;
}
case RTE_FLOW_ACTION_TYPE_FLAG: {
- if (overlap & MARK)
- goto unsupported;
- overlap |= MARK;
/* ENIC_MAGIC_FILTER_ID is reserved for flagging */
memset(&fm_op, 0, sizeof(fm_op));
fm_op.fa_op = FMOP_MARK;
actions->conf;
/*
- * If other fate kind is set, fail. Multiple
- * queue actions are ok.
+ * If fate other than QUEUE or RSS, fail. Multiple
+ * rss and queue actions are ok.
*/
if ((overlap & FATE) && first_rq)
goto unsupported;
fm_op.fa_op = FMOP_RQ_STEER;
fm_op.rq_steer.rq_index =
enic_rte_rq_idx_to_sop_idx(queue->index);
+ fm_op.rq_steer.rq_count = 1;
fm_op.rq_steer.vnic_handle = vnic_h;
ret = enic_fm_append_action_op(fm, &fm_op, error);
if (ret)
uint16_t i;
/*
- * Hardware does not support general RSS actions, but
- * we can still support the dummy one that is used to
- * "receive normally".
+ * If fate other than QUEUE or RSS, fail. Multiple
+ * rss and queue actions are ok.
+ */
+ if ((overlap & FATE) && first_rq)
+ goto unsupported;
+ first_rq = false;
+ overlap |= FATE;
+
+ /*
+ * Hardware only supports RSS actions on outer level
+ * with default type and function. Queues must be
+ * sequential.
*/
allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
- rss->level == 0 &&
- (rss->types == 0 ||
- rss->types == enic->rss_hf) &&
- rss->queue_num == enic->rq_count &&
- rss->key_len == 0;
- /* Identity queue map is ok */
- for (i = 0; i < rss->queue_num; i++)
- allow = allow && (i == rss->queue[i]);
+ rss->level == 0 && (rss->types == 0 ||
+ rss->types == enic->rss_hf) &&
+ rss->queue_num <= enic->rq_count &&
+ rss->queue[rss->queue_num - 1] < enic->rq_count;
+
+
+ /* Identity queue map needs to be sequential */
+ for (i = 1; i < rss->queue_num; i++)
+ allow = allow && (rss->queue[i] ==
+ rss->queue[i - 1] + 1);
if (!allow)
goto unsupported;
- if (overlap & FATE)
- goto unsupported;
- /* Need MARK or FLAG */
- if (!(overlap & MARK))
- goto unsupported;
- overlap |= FATE;
+
+ memset(&fm_op, 0, sizeof(fm_op));
+ fm_op.fa_op = FMOP_RQ_STEER;
+ fm_op.rq_steer.rq_index =
+ enic_rte_rq_idx_to_sop_idx(rss->queue[0]);
+ fm_op.rq_steer.rq_count = rss->queue_num;
+ fm_op.rq_steer.vnic_handle = vnic_h;
+ ret = enic_fm_append_action_op(fm, &fm_op, error);
+ if (ret)
+ return ret;
+ ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u",
+ fm_op.rq_steer.rq_index);
break;
}
case RTE_FLOW_ACTION_TYPE_PORT_ID: {
break;
}
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: {
+ if (overlap & DECAP)
+ goto unsupported;
+ overlap |= DECAP;
+
ret = enic_fm_copy_vxlan_decap(fm, fmt, actions,
error);
if (ret != 0)
return ret;
break;
}
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: {
+ memset(&fm_op, 0, sizeof(fm_op));
+ fm_op.fa_op = FMOP_POP_VLAN;
+ ret = enic_fm_append_action_op(fm, &fm_op, error);
+ if (ret)
+ return ret;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: {
+ const struct rte_flow_action_of_push_vlan *vlan;
+
+ if (overlap & PASSTHRU)
+ goto unsupported;
+ vlan = actions->conf;
+ if (vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN)) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "unexpected push_vlan ethertype");
+ }
+ overlap |= PUSH_VLAN;
+ need_ovlan_action = true;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: {
+ const struct rte_flow_action_of_set_vlan_pcp *pcp;
+
+ pcp = actions->conf;
+ if (pcp->vlan_pcp > 7) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "invalid vlan_pcp");
+ }
+ need_ovlan_action = true;
+ ovlan |= ((uint16_t)pcp->vlan_pcp) << 13;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: {
+ const struct rte_flow_action_of_set_vlan_vid *vid;
+
+ vid = actions->conf;
+ need_ovlan_action = true;
+ ovlan |= rte_be_to_cpu_16(vid->vlan_vid);
+ break;
+ }
default:
goto unsupported;
}
if (!(overlap & (FATE | PASSTHRU | COUNT)))
goto unsupported;
+ if (need_ovlan_action) {
+ memset(&fm_op, 0, sizeof(fm_op));
+ fm_op.fa_op = FMOP_SET_OVLAN;
+ fm_op.ovlan.vlan = ovlan;
+ ret = enic_fm_append_action_op(fm, &fm_op, error);
+ if (ret)
+ return ret;
+ }
memset(&fm_op, 0, sizeof(fm_op));
fm_op.fa_op = FMOP_END;
ret = enic_fm_append_action_op(fm, &fm_op, error);
[FMOP_ENCAP] = "encap",
[FMOP_SET_OVLAN] = "set_ovlan",
[FMOP_DECAP_NOSTRIP] = "decap_nostrip",
+ [FMOP_DECAP_STRIP] = "decap_strip",
+ [FMOP_POP_VLAN] = "pop_vlan",
+ [FMOP_SET_EGPORT] = "set_egport",
+ [FMOP_RQ_STEER_ONLY] = "rq_steer_only",
+ [FMOP_SET_ENCAP_VLAN] = "set_encap_vlan",
+ [FMOP_EMIT] = "emit",
+ [FMOP_MODIFY] = "modify",
};
const struct fm_action_op *op = &fm_action->fma_action_ops[0];
char buf[128], *bp = buf;
const struct fm_action *fm_action,
uint8_t ingress)
{
- if (rte_log_get_level(enic_pmd_logtype) < (int)RTE_LOG_DEBUG)
+ if (!rte_log_can_log(enic_pmd_logtype, RTE_LOG_DEBUG))
return;
enic_fm_dump_tcam_match(fm_match, ingress);
enic_fm_dump_tcam_actions(fm_action);
struct enic_fm_counter *ctrs;
struct enic *enic;
int i, rc;
- u64 args[2];
+ uint64_t args[2];
ENICPMD_FUNC_TRACE();
enic = fm->enic;
enic_fm_counter_zero(struct enic_flowman *fm, struct enic_fm_counter *c)
{
struct enic *enic;
- u64 args[3];
+ uint64_t args[3];
int ret;
ENICPMD_FUNC_TRACE();
static int
enic_fm_action_free(struct enic_flowman *fm, uint64_t handle)
{
- u64 args[2];
+ uint64_t args[2];
int rc;
ENICPMD_FUNC_TRACE();
static int
enic_fm_entry_free(struct enic_flowman *fm, uint64_t handle)
{
- u64 args[2];
+ uint64_t args[2];
int rc;
ENICPMD_FUNC_TRACE();
struct rte_flow_error *error)
{
struct fm_tcam_match_entry *ftm;
- u64 args[3];
+ uint64_t args[3];
int ret;
ENICPMD_FUNC_TRACE();
struct rte_flow_error *error)
{
struct fm_exact_match_entry *fem;
- u64 args[3];
+ uint64_t args[3];
int ret;
ENICPMD_FUNC_TRACE();
struct fm_action *fma;
uint64_t action_h;
uint64_t entry_h;
- u64 args[3];
+ uint64_t args[3];
int ret;
ENICPMD_FUNC_TRACE();
struct rte_flow_query_count *query;
struct enic_fm_flow *fm_flow;
struct enic *enic;
- u64 args[3];
+ uint64_t args[3];
int rc;
ENICPMD_FUNC_TRACE();
static int
enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle)
{
- u64 args[2];
+ uint64_t args[2];
int rc;
args[0] = FM_MATCH_TABLE_FREE;
{
struct fm_tcam_match_table *tcam_tbl;
struct enic *enic;
- u64 args[2];
+ uint64_t args[2];
int rc;
ENICPMD_FUNC_TRACE();
enic_fm_free_all_counters(struct enic_flowman *fm)
{
struct enic *enic;
- u64 args[2];
+ uint64_t args[2];
int rc;
enic = fm->enic;
enic_fm_init(struct enic *enic)
{
struct enic_flowman *fm;
- u8 name[NAME_MAX];
+ uint8_t name[RTE_MEMZONE_NAMESIZE];
int rc;
if (enic->flow_filter_mode != FILTER_FLOWMAN)
TAILQ_INIT(&fm->fet_list);
TAILQ_INIT(&fm->jump_list);
/* Allocate host memory for flowman commands */
- snprintf((char *)name, NAME_MAX, "fm-cmd-%s", enic->bdf_name);
+ snprintf((char *)name, sizeof(name), "fm-cmd-%s", enic->bdf_name);
fm->cmd.va = enic_alloc_consistent(enic,
sizeof(union enic_flowman_cmd_mem), &fm->cmd.pa, name);
if (!fm->cmd.va) {