count = Y
drop = Y
flag = Y
+mark = Y
+of_pop_vlan = Y
+of_push_vlan = Y
+of_set_vlan_pcp = Y
+of_set_vlan_vid = Y
pf = Y
queue = Y
rss = Y
count = Y
drop = Y
flag = Y
+mark = Y
+of_pop_vlan = Y
+of_push_vlan = Y
+of_set_vlan_pcp = Y
+of_set_vlan_vid = Y
pf = Y
queue = Y
rss = Y
count = Y
drop = Y
flag = Y
+mark = Y
+of_pop_vlan = Y
+of_push_vlan = Y
+of_set_vlan_pcp = Y
+of_set_vlan_vid = Y
pf = Y
queue = Y
rss = Y
#include <cnxk_rte_flow.h>
#include "cn10k_rte_flow.h"
#include "cn10k_ethdev.h"
+#include "cn10k_rx.h"
struct rte_flow *
cn10k_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int mark_actions = 0, vtag_actions = 0;
+ struct roc_npc *npc = &dev->npc;
struct roc_npc_flow *flow;
flow = cnxk_flow_create(eth_dev, attr, pattern, actions, error);
if (!flow)
return NULL;
+ mark_actions = roc_npc_mark_actions_get(npc);
+
+ if (mark_actions) {
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
+ cn10k_eth_set_rx_function(eth_dev);
+ }
+
+ vtag_actions = roc_npc_vtag_actions_get(npc);
+
+ if (vtag_actions) {
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
+ cn10k_eth_set_rx_function(eth_dev);
+ }
+
return (struct rte_flow *)flow;
}
struct rte_flow_error *error)
{
struct roc_npc_flow *flow = (struct roc_npc_flow *)rte_flow;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int mark_actions = 0, vtag_actions = 0;
+ struct roc_npc *npc = &dev->npc;
+
+ mark_actions = roc_npc_mark_actions_get(npc);
+ if (mark_actions) {
+ mark_actions = roc_npc_mark_actions_sub_return(npc, 1);
+ if (mark_actions == 0) {
+ dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
+ cn10k_eth_set_rx_function(eth_dev);
+ }
+ }
+
+ vtag_actions = roc_npc_vtag_actions_get(npc);
+ if (vtag_actions) {
+ if (flow->nix_intf == ROC_NPC_INTF_RX) {
+ vtag_actions = roc_npc_vtag_actions_sub_return(npc, 1);
+ if (vtag_actions == 0) {
+ dev->rx_offload_flags &=
+ ~NIX_RX_OFFLOAD_VLAN_STRIP_F;
+ cn10k_eth_set_rx_function(eth_dev);
+ }
+ }
+ }
return cnxk_flow_destroy(eth_dev, flow, error);
}
#include "cn10k_ethdev.h"
#include "cn10k_rx.h"
-#define R(name, f4, f3, f2, f1, f0, flags) \
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
static inline void
pick_rx_func(struct rte_eth_dev *eth_dev,
- const eth_rx_burst_t rx_burst[2][2][2][2][2])
+ const eth_rx_burst_t rx_burst[2][2][2][2][2][2])
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- /* [TSP] [MARK] [CKSUM] [PTYPE] [RSS] */
+ /* [VLAN] [TSP] [MARK] [CKSUM] [PTYPE] [RSS] */
eth_dev->rx_pkt_burst = rx_burst
+ [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_CHECKSUM_F)]
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2] = {
-#define R(name, f4, f3, f2, f1, f0, flags) \
- [f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_##name,
+ const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
- const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2] = {
-#define R(name, f4, f3, f2, f1, f0, flags) \
- [f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_mseg_##name,
+ const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_mseg_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
- const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2][2] = {
-#define R(name, f4, f3, f2, f1, f0, flags) \
- [f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_vec_##name,
+ const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_vec_##name,
NIX_RX_FASTPATH_MODES
#undef R
/* Copy multi seg version with no offload for tear down sequence */
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
dev->rx_pkt_burst_no_offload =
- nix_eth_rx_burst_mseg[0][0][0][0][0];
+ nix_eth_rx_burst_mseg[0][0][0][0][0][0];
rte_mb();
}
#define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
#define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
#define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
+#define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(5)
/* Flags to control cqe_to_mbuf conversion function.
* Defining it from backwards to denote its been
if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
+ if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
+ if (rx->vtag0_gone) {
+ ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mbuf->vlan_tci = rx->vtag0_tci;
+ }
+ if (rx->vtag1_gone) {
+ ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ mbuf->vlan_tci_outer = rx->vtag1_tci;
+ }
+ }
+
if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
ol_flags = nix_update_match_id(rx->match_id, ol_flags, mbuf);
#if defined(RTE_ARCH_ARM64)
+static __rte_always_inline uint64_t
+nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
+{
+ if (w2 & BIT_ULL(21) /* vtag0_gone */) {
+ ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ *f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
+ }
+
+ return ol_flags;
+}
+
+static __rte_always_inline uint64_t
+nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
+{
+ if (w2 & BIT_ULL(23) /* vtag1_gone */) {
+ ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
+ }
+
+ return ol_flags;
+}
+
static __rte_always_inline uint16_t
cn10k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t pkts, const uint16_t flags)
ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
}
+ if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
+ uint64_t cq0_w2 = *(uint64_t *)(cq0 + CQE_SZ(0) + 16);
+ uint64_t cq1_w2 = *(uint64_t *)(cq0 + CQE_SZ(1) + 16);
+ uint64_t cq2_w2 = *(uint64_t *)(cq0 + CQE_SZ(2) + 16);
+ uint64_t cq3_w2 = *(uint64_t *)(cq0 + CQE_SZ(3) + 16);
+
+ ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
+ ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
+ ol_flags2 = nix_vlan_update(cq2_w2, ol_flags2, &f2);
+ ol_flags3 = nix_vlan_update(cq3_w2, ol_flags3, &f3);
+
+ ol_flags0 = nix_qinq_update(cq0_w2, ol_flags0, mbuf0);
+ ol_flags1 = nix_qinq_update(cq1_w2, ol_flags1, mbuf1);
+ ol_flags2 = nix_qinq_update(cq2_w2, ol_flags2, mbuf2);
+ ol_flags3 = nix_qinq_update(cq3_w2, ol_flags3, mbuf3);
+ }
+
if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
ol_flags0 = nix_update_match_id(
*(uint16_t *)(cq0 + CQE_SZ(0) + 38), ol_flags0,
#define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
#define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
#define TS_F NIX_RX_OFFLOAD_TSTAMP_F
+#define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F
-/* [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
+/* [RX_VLAN_F] [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
#define NIX_RX_FASTPATH_MODES \
-R(no_offload, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
-R(rss, 0, 0, 0, 0, 1, RSS_F) \
-R(ptype, 0, 0, 0, 1, 0, PTYPE_F) \
-R(ptype_rss, 0, 0, 0, 1, 1, PTYPE_F | RSS_F) \
-R(cksum, 0, 0, 1, 0, 0, CKSUM_F) \
-R(cksum_rss, 0, 0, 1, 0, 1, CKSUM_F | RSS_F) \
-R(cksum_ptype, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
-R(cksum_ptype_rss, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
-R(mark, 0, 1, 0, 0, 0, MARK_F) \
-R(mark_rss, 0, 1, 0, 0, 1, MARK_F | RSS_F) \
-R(mark_ptype, 0, 1, 0, 1, 0, MARK_F | PTYPE_F) \
-R(mark_ptype_rss, 0, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
-R(mark_cksum, 0, 1, 1, 0, 0, MARK_F | CKSUM_F) \
-R(mark_cksum_rss, 0, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
-R(mark_cksum_ptype, 0, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F) \
-R(mark_cksum_ptype_rss, 0, 1, 1, 1, 1, MARK_F | CKSUM_F | PTYPE_F | RSS_F)\
-R(ts, 1, 0, 0, 0, 0, TS_F) \
-R(ts_rss, 1, 0, 0, 0, 1, TS_F | RSS_F) \
-R(ts_ptype, 1, 0, 0, 1, 0, TS_F | PTYPE_F) \
-R(ts_ptype_rss, 1, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F) \
-R(ts_cksum, 1, 0, 1, 0, 0, TS_F | CKSUM_F) \
-R(ts_cksum_rss, 1, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F) \
-R(ts_cksum_ptype, 1, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F) \
-R(ts_cksum_ptype_rss, 1, 0, 1, 1, 1, TS_F | CKSUM_F | PTYPE_F | RSS_F)\
-R(ts_mark, 1, 1, 0, 0, 0, TS_F | MARK_F) \
-R(ts_mark_rss, 1, 1, 0, 0, 1, TS_F | MARK_F | RSS_F) \
-R(ts_mark_ptype, 1, 1, 0, 1, 0, TS_F | MARK_F | PTYPE_F) \
-R(ts_mark_ptype_rss, 1, 1, 0, 1, 1, TS_F | MARK_F | PTYPE_F | RSS_F)\
-R(ts_mark_cksum, 1, 1, 1, 0, 0, TS_F | MARK_F | CKSUM_F) \
-R(ts_mark_cksum_rss, 1, 1, 1, 0, 1, TS_F | MARK_F | CKSUM_F | RSS_F)\
-R(ts_mark_cksum_ptype, 1, 1, 1, 1, 0, TS_F | MARK_F | CKSUM_F | PTYPE_F)\
-R(ts_mark_cksum_ptype_rss, 1, 1, 1, 1, 1, TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
-
-#define R(name, f4, f3, f2, f1, f0, flags) \
+R(no_offload, 0, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
+R(rss, 0, 0, 0, 0, 0, 1, RSS_F) \
+R(ptype, 0, 0, 0, 0, 1, 0, PTYPE_F) \
+R(ptype_rss, 0, 0, 0, 0, 1, 1, PTYPE_F | RSS_F) \
+R(cksum, 0, 0, 0, 1, 0, 0, CKSUM_F) \
+R(cksum_rss, 0, 0, 0, 1, 0, 1, CKSUM_F | RSS_F) \
+R(cksum_ptype, 0, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
+R(cksum_ptype_rss, 0, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
+R(mark, 0, 0, 1, 0, 0, 0, MARK_F) \
+R(mark_rss, 0, 0, 1, 0, 0, 1, MARK_F | RSS_F) \
+R(mark_ptype, 0, 0, 1, 0, 1, 0, MARK_F | PTYPE_F) \
+R(mark_ptype_rss, 0, 0, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
+R(mark_cksum, 0, 0, 1, 1, 0, 0, MARK_F | CKSUM_F) \
+R(mark_cksum_rss, 0, 0, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
+R(mark_cksum_ptype, 0, 0, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F) \
+R(mark_cksum_ptype_rss, 0, 0, 1, 1, 1, 1, \
+ MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(ts, 0, 1, 0, 0, 0, 0, TS_F) \
+R(ts_rss, 0, 1, 0, 0, 0, 1, TS_F | RSS_F) \
+R(ts_ptype, 0, 1, 0, 0, 1, 0, TS_F | PTYPE_F) \
+R(ts_ptype_rss, 0, 1, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F) \
+R(ts_cksum, 0, 1, 0, 1, 0, 0, TS_F | CKSUM_F) \
+R(ts_cksum_rss, 0, 1, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F) \
+R(ts_cksum_ptype, 0, 1, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F) \
+R(ts_cksum_ptype_rss, 0, 1, 0, 1, 1, 1, \
+ TS_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(ts_mark, 0, 1, 1, 0, 0, 0, TS_F | MARK_F) \
+R(ts_mark_rss, 0, 1, 1, 0, 0, 1, TS_F | MARK_F | RSS_F) \
+R(ts_mark_ptype, 0, 1, 1, 0, 1, 0, TS_F | MARK_F | PTYPE_F) \
+R(ts_mark_ptype_rss, 0, 1, 1, 0, 1, 1, \
+ TS_F | MARK_F | PTYPE_F | RSS_F) \
+R(ts_mark_cksum, 0, 1, 1, 1, 0, 0, TS_F | MARK_F | CKSUM_F) \
+R(ts_mark_cksum_rss, 0, 1, 1, 1, 0, 1, \
+ TS_F | MARK_F | CKSUM_F | RSS_F) \
+R(ts_mark_cksum_ptype, 0, 1, 1, 1, 1, 0, \
+ TS_F | MARK_F | CKSUM_F | PTYPE_F) \
+R(ts_mark_cksum_ptype_rss, 0, 1, 1, 1, 1, 1, \
+ TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(vlan, 1, 0, 0, 0, 0, 0, RX_VLAN_F) \
+R(vlan_rss, 1, 0, 0, 0, 0, 1, RX_VLAN_F | RSS_F) \
+R(vlan_ptype, 1, 0, 0, 0, 1, 0, RX_VLAN_F | PTYPE_F) \
+R(vlan_ptype_rss, 1, 0, 0, 0, 1, 1, RX_VLAN_F | PTYPE_F | RSS_F) \
+R(vlan_cksum, 1, 0, 0, 1, 0, 0, RX_VLAN_F | CKSUM_F) \
+R(vlan_cksum_rss, 1, 0, 0, 1, 0, 1, RX_VLAN_F | CKSUM_F | RSS_F) \
+R(vlan_cksum_ptype, 1, 0, 0, 1, 1, 0, \
+ RX_VLAN_F | CKSUM_F | PTYPE_F) \
+R(vlan_cksum_ptype_rss, 1, 0, 0, 1, 1, 1, \
+ RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(vlan_mark, 1, 0, 1, 0, 0, 0, RX_VLAN_F | MARK_F) \
+R(vlan_mark_rss, 1, 0, 1, 0, 0, 1, RX_VLAN_F | MARK_F | RSS_F) \
+R(vlan_mark_ptype, 1, 0, 1, 0, 1, 0, RX_VLAN_F | MARK_F | PTYPE_F)\
+R(vlan_mark_ptype_rss, 1, 0, 1, 0, 1, 1, \
+ RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
+R(vlan_mark_cksum, 1, 0, 1, 1, 0, 0, RX_VLAN_F | MARK_F | CKSUM_F)\
+R(vlan_mark_cksum_rss, 1, 0, 1, 1, 0, 1, \
+ RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
+R(vlan_mark_cksum_ptype, 1, 0, 1, 1, 1, 0, \
+ RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
+R(vlan_mark_cksum_ptype_rss, 1, 0, 1, 1, 1, 1, \
+ RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(vlan_ts, 1, 1, 0, 0, 0, 0, RX_VLAN_F | TS_F) \
+R(vlan_ts_rss, 1, 1, 0, 0, 0, 1, RX_VLAN_F | TS_F | RSS_F) \
+R(vlan_ts_ptype, 1, 1, 0, 0, 1, 0, RX_VLAN_F | TS_F | PTYPE_F) \
+R(vlan_ts_ptype_rss, 1, 1, 0, 0, 1, 1, \
+ RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
+R(vlan_ts_cksum, 1, 1, 0, 1, 0, 0, RX_VLAN_F | TS_F | CKSUM_F) \
+R(vlan_ts_cksum_rss, 1, 1, 0, 1, 0, 1, \
+ RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
+R(vlan_ts_cksum_ptype, 1, 1, 0, 1, 1, 0, \
+ RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
+R(vlan_ts_cksum_ptype_rss, 1, 1, 0, 1, 1, 1, \
+ RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(vlan_ts_mark, 1, 1, 1, 0, 0, 0, RX_VLAN_F | TS_F | MARK_F) \
+R(vlan_ts_mark_rss, 1, 1, 1, 0, 0, 1, \
+ RX_VLAN_F | TS_F | MARK_F | RSS_F) \
+R(vlan_ts_mark_ptype, 1, 1, 1, 0, 1, 0, \
+ RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
+R(vlan_ts_mark_ptype_rss, 1, 1, 1, 0, 1, 1, \
+ RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
+R(vlan_ts_mark_cksum, 1, 1, 1, 1, 0, 0, \
+ RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
+R(vlan_ts_mark_cksum_rss, 1, 1, 1, 1, 0, 1, \
+ RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
+R(vlan_ts_mark_cksum_ptype, 1, 1, 1, 1, 1, 0, \
+ RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
+R(vlan_ts_mark_cksum_ptype_rss, 1, 1, 1, 1, 1, 1, \
+ RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
\
#include "cn10k_ethdev.h"
#include "cn10k_rx.h"
-#define R(name, f4, f3, f2, f1, f0, flags) \
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_mseg_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
#include "cn10k_ethdev.h"
#include "cn10k_rx.h"
-#define R(name, f4, f3, f2, f1, f0, flags) \
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot \
cn10k_nix_recv_pkts_vec_##name(void *rx_queue, \
struct rte_mbuf **rx_pkts, \
#include <cnxk_rte_flow.h>
#include "cn9k_ethdev.h"
#include "cn9k_rte_flow.h"
+#include "cn9k_rx.h"
struct rte_flow *
cn9k_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int mark_actions = 0, vtag_actions = 0;
+ struct roc_npc *npc = &dev->npc;
struct roc_npc_flow *flow;
flow = cnxk_flow_create(eth_dev, attr, pattern, actions, error);
if (!flow)
return NULL;
+ mark_actions = roc_npc_mark_actions_get(npc);
+
+ if (mark_actions) {
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
+ cn9k_eth_set_rx_function(eth_dev);
+ }
+
+ vtag_actions = roc_npc_vtag_actions_get(npc);
+
+ if (vtag_actions) {
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
+ cn9k_eth_set_rx_function(eth_dev);
+ }
+
return (struct rte_flow *)flow;
}
struct rte_flow_error *error)
{
struct roc_npc_flow *flow = (struct roc_npc_flow *)rte_flow;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int mark_actions = 0, vtag_actions = 0;
+ struct roc_npc *npc = &dev->npc;
+
+ mark_actions = roc_npc_mark_actions_get(npc);
+ if (mark_actions) {
+ mark_actions = roc_npc_mark_actions_sub_return(npc, 1);
+ if (mark_actions == 0) {
+ dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
+ cn9k_eth_set_rx_function(eth_dev);
+ }
+ }
+
+ vtag_actions = roc_npc_vtag_actions_get(npc);
+ if (vtag_actions) {
+ if (flow->nix_intf == ROC_NPC_INTF_RX) {
+ vtag_actions = roc_npc_vtag_actions_sub_return(npc, 1);
+ if (vtag_actions == 0) {
+ dev->rx_offload_flags &=
+ ~NIX_RX_OFFLOAD_VLAN_STRIP_F;
+ cn9k_eth_set_rx_function(eth_dev);
+ }
+ }
+ }
return cnxk_flow_destroy(eth_dev, flow, error);
}
#include "cn9k_ethdev.h"
#include "cn9k_rx.h"
-#define R(name, f4, f3, f2, f1, f0, flags) \
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
static inline void
pick_rx_func(struct rte_eth_dev *eth_dev,
- const eth_rx_burst_t rx_burst[2][2][2][2][2])
+ const eth_rx_burst_t rx_burst[2][2][2][2][2][2])
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- /* [TSP] [MARK] [CKSUM] [PTYPE] [RSS] */
+ /* [TSP] [MARK] [VLAN] [CKSUM] [PTYPE] [RSS] */
eth_dev->rx_pkt_burst = rx_burst
+ [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_CHECKSUM_F)]
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2] = {
-#define R(name, f4, f3, f2, f1, f0, flags) \
- [f4][f3][f2][f1][f0] = cn9k_nix_recv_pkts_##name,
+ const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_nix_recv_pkts_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
- const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2] = {
-#define R(name, f4, f3, f2, f1, f0, flags) \
- [f4][f3][f2][f1][f0] = cn9k_nix_recv_pkts_mseg_##name,
+ const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_nix_recv_pkts_mseg_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
- const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2][2] = {
-#define R(name, f4, f3, f2, f1, f0, flags) \
- [f4][f3][f2][f1][f0] = cn9k_nix_recv_pkts_vec_##name,
+ const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_nix_recv_pkts_vec_##name,
NIX_RX_FASTPATH_MODES
#undef R
/* Copy multi seg version with no offload for tear down sequence */
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
dev->rx_pkt_burst_no_offload =
- nix_eth_rx_burst_mseg[0][0][0][0][0];
+ nix_eth_rx_burst_mseg[0][0][0][0][0][0];
rte_mb();
}
#define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
#define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
#define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
+#define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(5)
/* Flags to control cqe_to_mbuf conversion function.
* Defining it from backwards to denote its been
if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
+ if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
+ if (rx->cn9k.vtag0_gone) {
+ ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mbuf->vlan_tci = rx->cn9k.vtag0_tci;
+ }
+ if (rx->cn9k.vtag1_gone) {
+ ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ mbuf->vlan_tci_outer = rx->cn9k.vtag1_tci;
+ }
+ }
+
if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
ol_flags =
nix_update_match_id(rx->cn9k.match_id, ol_flags, mbuf);
#if defined(RTE_ARCH_ARM64)
+static __rte_always_inline uint64_t
+nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
+{
+ if (w2 & BIT_ULL(21) /* vtag0_gone */) {
+ ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ *f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
+ }
+
+ return ol_flags;
+}
+
+static __rte_always_inline uint64_t
+nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
+{
+ if (w2 & BIT_ULL(23) /* vtag1_gone */) {
+ ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
+ }
+
+ return ol_flags;
+}
+
static __rte_always_inline uint16_t
cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t pkts, const uint16_t flags)
ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
}
+ if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
+ uint64_t cq0_w2 = *(uint64_t *)(cq0 + CQE_SZ(0) + 16);
+ uint64_t cq1_w2 = *(uint64_t *)(cq0 + CQE_SZ(1) + 16);
+ uint64_t cq2_w2 = *(uint64_t *)(cq0 + CQE_SZ(2) + 16);
+ uint64_t cq3_w2 = *(uint64_t *)(cq0 + CQE_SZ(3) + 16);
+
+ ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
+ ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
+ ol_flags2 = nix_vlan_update(cq2_w2, ol_flags2, &f2);
+ ol_flags3 = nix_vlan_update(cq3_w2, ol_flags3, &f3);
+
+ ol_flags0 = nix_qinq_update(cq0_w2, ol_flags0, mbuf0);
+ ol_flags1 = nix_qinq_update(cq1_w2, ol_flags1, mbuf1);
+ ol_flags2 = nix_qinq_update(cq2_w2, ol_flags2, mbuf2);
+ ol_flags3 = nix_qinq_update(cq3_w2, ol_flags3, mbuf3);
+ }
+
if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
ol_flags0 = nix_update_match_id(
*(uint16_t *)(cq0 + CQE_SZ(0) + 38), ol_flags0,
#define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
#define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
#define TS_F NIX_RX_OFFLOAD_TSTAMP_F
+#define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F
-/* [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
+/* [RX_VLAN_F] [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
#define NIX_RX_FASTPATH_MODES \
-R(no_offload, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
-R(rss, 0, 0, 0, 0, 1, RSS_F) \
-R(ptype, 0, 0, 0, 1, 0, PTYPE_F) \
-R(ptype_rss, 0, 0, 0, 1, 1, PTYPE_F | RSS_F) \
-R(cksum, 0, 0, 1, 0, 0, CKSUM_F) \
-R(cksum_rss, 0, 0, 1, 0, 1, CKSUM_F | RSS_F) \
-R(cksum_ptype, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
-R(cksum_ptype_rss, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
-R(mark, 0, 1, 0, 0, 0, MARK_F) \
-R(mark_rss, 0, 1, 0, 0, 1, MARK_F | RSS_F) \
-R(mark_ptype, 0, 1, 0, 1, 0, MARK_F | PTYPE_F) \
-R(mark_ptype_rss, 0, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
-R(mark_cksum, 0, 1, 1, 0, 0, MARK_F | CKSUM_F) \
-R(mark_cksum_rss, 0, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
-R(mark_cksum_ptype, 0, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F) \
-R(mark_cksum_ptype_rss, 0, 1, 1, 1, 1, MARK_F | CKSUM_F | PTYPE_F | RSS_F)\
-R(ts, 1, 0, 0, 0, 0, TS_F) \
-R(ts_rss, 1, 0, 0, 0, 1, TS_F | RSS_F) \
-R(ts_ptype, 1, 0, 0, 1, 0, TS_F | PTYPE_F) \
-R(ts_ptype_rss, 1, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F) \
-R(ts_cksum, 1, 0, 1, 0, 0, TS_F | CKSUM_F) \
-R(ts_cksum_rss, 1, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F) \
-R(ts_cksum_ptype, 1, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F) \
-R(ts_cksum_ptype_rss, 1, 0, 1, 1, 1, TS_F | CKSUM_F | PTYPE_F | RSS_F)\
-R(ts_mark, 1, 1, 0, 0, 0, TS_F | MARK_F) \
-R(ts_mark_rss, 1, 1, 0, 0, 1, TS_F | MARK_F | RSS_F) \
-R(ts_mark_ptype, 1, 1, 0, 1, 0, TS_F | MARK_F | PTYPE_F) \
-R(ts_mark_ptype_rss, 1, 1, 0, 1, 1, TS_F | MARK_F | PTYPE_F | RSS_F)\
-R(ts_mark_cksum, 1, 1, 1, 0, 0, TS_F | MARK_F | CKSUM_F) \
-R(ts_mark_cksum_rss, 1, 1, 1, 0, 1, TS_F | MARK_F | CKSUM_F | RSS_F)\
-R(ts_mark_cksum_ptype, 1, 1, 1, 1, 0, TS_F | MARK_F | CKSUM_F | PTYPE_F)\
-R(ts_mark_cksum_ptype_rss, 1, 1, 1, 1, 1, TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
-
-#define R(name, f4, f3, f2, f1, f0, flags) \
+R(no_offload, 0, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
+R(rss, 0, 0, 0, 0, 0, 1, RSS_F) \
+R(ptype, 0, 0, 0, 0, 1, 0, PTYPE_F) \
+R(ptype_rss, 0, 0, 0, 0, 1, 1, PTYPE_F | RSS_F) \
+R(cksum, 0, 0, 0, 1, 0, 0, CKSUM_F) \
+R(cksum_rss, 0, 0, 0, 1, 0, 1, CKSUM_F | RSS_F) \
+R(cksum_ptype, 0, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
+R(cksum_ptype_rss, 0, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
+R(mark, 0, 0, 1, 0, 0, 0, MARK_F) \
+R(mark_rss, 0, 0, 1, 0, 0, 1, MARK_F | RSS_F) \
+R(mark_ptype, 0, 0, 1, 0, 1, 0, MARK_F | PTYPE_F) \
+R(mark_ptype_rss, 0, 0, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
+R(mark_cksum, 0, 0, 1, 1, 0, 0, MARK_F | CKSUM_F) \
+R(mark_cksum_rss, 0, 0, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
+R(mark_cksum_ptype, 0, 0, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F) \
+R(mark_cksum_ptype_rss, 0, 0, 1, 1, 1, 1, \
+ MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(ts, 0, 1, 0, 0, 0, 0, TS_F) \
+R(ts_rss, 0, 1, 0, 0, 0, 1, TS_F | RSS_F) \
+R(ts_ptype, 0, 1, 0, 0, 1, 0, TS_F | PTYPE_F) \
+R(ts_ptype_rss, 0, 1, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F) \
+R(ts_cksum, 0, 1, 0, 1, 0, 0, TS_F | CKSUM_F) \
+R(ts_cksum_rss, 0, 1, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F) \
+R(ts_cksum_ptype, 0, 1, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F) \
+R(ts_cksum_ptype_rss, 0, 1, 0, 1, 1, 1, \
+ TS_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(ts_mark, 0, 1, 1, 0, 0, 0, TS_F | MARK_F) \
+R(ts_mark_rss, 0, 1, 1, 0, 0, 1, TS_F | MARK_F | RSS_F) \
+R(ts_mark_ptype, 0, 1, 1, 0, 1, 0, TS_F | MARK_F | PTYPE_F) \
+R(ts_mark_ptype_rss, 0, 1, 1, 0, 1, 1, \
+ TS_F | MARK_F | PTYPE_F | RSS_F) \
+R(ts_mark_cksum, 0, 1, 1, 1, 0, 0, TS_F | MARK_F | CKSUM_F) \
+R(ts_mark_cksum_rss, 0, 1, 1, 1, 0, 1, \
+ TS_F | MARK_F | CKSUM_F | RSS_F) \
+R(ts_mark_cksum_ptype, 0, 1, 1, 1, 1, 0, \
+ TS_F | MARK_F | CKSUM_F | PTYPE_F) \
+R(ts_mark_cksum_ptype_rss, 0, 1, 1, 1, 1, 1, \
+ TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(vlan, 1, 0, 0, 0, 0, 0, RX_VLAN_F) \
+R(vlan_rss, 1, 0, 0, 0, 0, 1, RX_VLAN_F | RSS_F) \
+R(vlan_ptype, 1, 0, 0, 0, 1, 0, RX_VLAN_F | PTYPE_F) \
+R(vlan_ptype_rss, 1, 0, 0, 0, 1, 1, RX_VLAN_F | PTYPE_F | RSS_F) \
+R(vlan_cksum, 1, 0, 0, 1, 0, 0, RX_VLAN_F | CKSUM_F) \
+R(vlan_cksum_rss, 1, 0, 0, 1, 0, 1, RX_VLAN_F | CKSUM_F | RSS_F) \
+R(vlan_cksum_ptype, 1, 0, 0, 1, 1, 0, \
+ RX_VLAN_F | CKSUM_F | PTYPE_F) \
+R(vlan_cksum_ptype_rss, 1, 0, 0, 1, 1, 1, \
+ RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(vlan_mark, 1, 0, 1, 0, 0, 0, RX_VLAN_F | MARK_F) \
+R(vlan_mark_rss, 1, 0, 1, 0, 0, 1, RX_VLAN_F | MARK_F | RSS_F) \
+R(vlan_mark_ptype, 1, 0, 1, 0, 1, 0, RX_VLAN_F | MARK_F | PTYPE_F)\
+R(vlan_mark_ptype_rss, 1, 0, 1, 0, 1, 1, \
+ RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
+R(vlan_mark_cksum, 1, 0, 1, 1, 0, 0, RX_VLAN_F | MARK_F | CKSUM_F)\
+R(vlan_mark_cksum_rss, 1, 0, 1, 1, 0, 1, \
+ RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
+R(vlan_mark_cksum_ptype, 1, 0, 1, 1, 1, 0, \
+ RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
+R(vlan_mark_cksum_ptype_rss, 1, 0, 1, 1, 1, 1, \
+ RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(vlan_ts, 1, 1, 0, 0, 0, 0, RX_VLAN_F | TS_F) \
+R(vlan_ts_rss, 1, 1, 0, 0, 0, 1, RX_VLAN_F | TS_F | RSS_F) \
+R(vlan_ts_ptype, 1, 1, 0, 0, 1, 0, RX_VLAN_F | TS_F | PTYPE_F) \
+R(vlan_ts_ptype_rss, 1, 1, 0, 0, 1, 1, \
+ RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
+R(vlan_ts_cksum, 1, 1, 0, 1, 0, 0, RX_VLAN_F | TS_F | CKSUM_F) \
+R(vlan_ts_cksum_rss, 1, 1, 0, 1, 0, 1, \
+ RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
+R(vlan_ts_cksum_ptype, 1, 1, 0, 1, 1, 0, \
+ RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
+R(vlan_ts_cksum_ptype_rss, 1, 1, 0, 1, 1, 1, \
+ RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(vlan_ts_mark, 1, 1, 1, 0, 0, 0, RX_VLAN_F | TS_F | MARK_F) \
+R(vlan_ts_mark_rss, 1, 1, 1, 0, 0, 1, \
+ RX_VLAN_F | TS_F | MARK_F | RSS_F) \
+R(vlan_ts_mark_ptype, 1, 1, 1, 0, 1, 0, \
+ RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
+R(vlan_ts_mark_ptype_rss, 1, 1, 1, 0, 1, 1, \
+ RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
+R(vlan_ts_mark_cksum, 1, 1, 1, 1, 0, 0, \
+ RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
+R(vlan_ts_mark_cksum_rss, 1, 1, 1, 1, 0, 1, \
+ RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
+R(vlan_ts_mark_cksum_ptype, 1, 1, 1, 1, 1, 0, \
+ RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
+R(vlan_ts_mark_cksum_ptype_rss, 1, 1, 1, 1, 1, 1, \
+ RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
\
#include "cn9k_ethdev.h"
#include "cn9k_rx.h"
-#define R(name, f4, f3, f2, f1, f0, flags) \
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_mseg_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
#include "cn9k_ethdev.h"
#include "cn9k_rx.h"
-#define R(name, f4, f3, f2, f1, f0, flags) \
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
/* Initialize base roc nix */
nix->pci_dev = pci_dev;
+ nix->hw_vlan_ins = true;
rc = roc_nix_dev_init(nix);
if (rc) {
plt_err("Failed to initialize roc nix rc=%d", rc);
(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_TIMESTAMP)
+ DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_TIMESTAMP | \
+ DEV_RX_OFFLOAD_VLAN_STRIP)
#define RSS_IPV4_ENABLE \
(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP | \
case RTE_FLOW_ACTION_TYPE_SECURITY:
in_actions[i].type = ROC_NPC_ACTION_TYPE_SEC;
break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_STRIP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_INSERT;
+ in_actions[i].conf = actions->conf;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ in_actions[i].type =
+ ROC_NPC_ACTION_TYPE_VLAN_ETHTYPE_INSERT;
+ in_actions[i].conf = actions->conf;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ in_actions[i].type =
+ ROC_NPC_ACTION_TYPE_VLAN_PCP_INSERT;
+ in_actions[i].conf = actions->conf;
+ break;
default:
plt_npc_dbg("Action is not supported = %d",
actions->type);