Patch adds support for vlan filter offload support.
MBOX messages for vlan filter on/off and vlan filter
entry add/rm are added to configure PCAM entries to
filter out the vlan traffic on a given port.
Patch also defines rx_offload_flag for vlan filtering.
Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
Acked-by: Harman Kalra <hkalra@marvell.com>
Scattered Rx = Y
Promiscuous mode = Y
Unicast MAC filter = Y
+VLAN filter = Y
+VLAN offload = P
CRC offload = Y
Packet type parsing = Y
Basic stats = Y
- Jumbo frames
- Scatter-Gather IO support
- Link state information
+- MAC/VLAN filtering
- MTU update
- SR-IOV VF
- Multiple queues for TX
SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_pkivf.c
SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_bgx.c
SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_ethdev_ops.c
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
CFLAGS_octeontx_rxtx.o += -fno-prefetch-loop-arrays
return res;
}
+int
+octeontx_pki_port_vlan_fltr_config(int port,
+ pki_port_vlan_filter_config_t *fltr_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_port_vlan_filter_config_t cfg = *fltr_cfg;
+ int len = sizeof(pki_port_vlan_filter_config_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_VLAN_FILTER_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+ return res;
+}
+
+int
+octeontx_pki_port_vlan_fltr_entry_config(int port,
+ pki_port_vlan_filter_entry_config_t *e_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_port_vlan_filter_entry_config_t cfg = *e_cfg;
+ int len = sizeof(pki_port_vlan_filter_entry_config_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_VLAN_FILTER_ENTRY_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+ return res;
+}
+
#define PCI_VENDOR_ID_CAVIUM 0x177D
#define PCI_DEVICE_ID_OCTEONTX_PKI_VF 0xA0DD
#define MBOX_PKI_PORT_ALLOC_QPG 21
#define MBOX_PKI_PORT_FREE_QPG 22
#define MBOX_PKI_SET_PORT_CONFIG 23
+#define MBOX_PKI_PORT_VLAN_FILTER_CONFIG 24
+#define MBOX_PKI_PORT_VLAN_FILTER_ENTRY_CONFIG 25
#define MBOX_PKI_MAX_QOS_ENTRY 64
struct pki_qos_entry qos_entry;
} pki_mod_qos_t;
+/* pki port VLAN filter config */
+typedef struct pki_port_vlan_filter_config {
+ uint8_t port_type; /* OCTTX_PORT_TYPE_[NET/INT/PCI] */
+ uint8_t fltr_conf; /* '1' to enable & '0' to disable */
+} pki_port_vlan_filter_config_t;
+
+/* pki port VLAN filter entry config */
+typedef struct pki_port_vlan_filter_entry_config {
+ uint8_t port_type; /* OCTTX_PORT_TYPE_[NET/INT/PCI] */
+ uint8_t entry_conf; /* '1' to add & '0' to remove */
+ uint16_t vlan_tpid; /* in host byte-order */
+ uint16_t vlan_id; /* in host byte-order */
+} pki_port_vlan_filter_entry_config_t;
+
static inline int
octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
{
int octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg);
int octeontx_pki_port_close(int port);
int octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg);
+int octeontx_pki_port_vlan_fltr_config(int port,
+ pki_port_vlan_filter_config_t *fltr_cfg);
+int octeontx_pki_port_vlan_fltr_entry_config(int port,
+ pki_port_vlan_filter_entry_config_t *entry_cfg);
#endif /* __OCTEONTX_PKI_H__ */
objs = [base_objs]
sources = files('octeontx_rxtx.c',
- 'octeontx_ethdev.c'
+ 'octeontx_ethdev.c',
+ 'octeontx_ethdev_ops.c'
)
deps += ['mempool_octeontx', 'eventdev']
return -EFAULT;
}
+ ret = octeontx_dev_vlan_offload_init(dev);
+ if (ret) {
+ octeontx_log_err("failed to initialize vlan offload");
+ return -EFAULT;
+ }
+
nic->pki.classifier_enable = false;
nic->pki.hash_enable = true;
nic->pki.initialized = false;
rte_event_dev_close(nic->evdev);
+ octeontx_dev_vlan_offload_fini(dev);
+
ret = octeontx_pko_channel_close(nic->base_ochan);
if (ret < 0) {
octeontx_log_err("failed to close channel %d VF%d %d %d",
.mac_addr_remove = octeontx_dev_mac_addr_del,
.mac_addr_add = octeontx_dev_mac_addr_add,
.mac_addr_set = octeontx_dev_default_mac_addr_set,
+ .vlan_offload_set = octeontx_dev_vlan_offload_set,
+ .vlan_filter_set = octeontx_dev_vlan_filter_set,
.tx_queue_start = octeontx_dev_tx_queue_start,
.tx_queue_stop = octeontx_dev_tx_queue_stop,
.tx_queue_setup = octeontx_dev_tx_queue_setup,
#define OCTEONTX_RX_OFFLOADS (DEV_RX_OFFLOAD_CHECKSUM | \
DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_JUMBO_FRAME)
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_VLAN_FILTER)
#define OCTEONTX_TX_OFFLOADS (DEV_TX_OFFLOAD_MT_LOCKFREE | \
DEV_TX_OFFLOAD_MBUF_FAST_FREE | \
extern uint16_t
rte_octeontx_pchan_map[OCTEONTX_MAX_BGX_PORTS][OCTEONTX_MAX_LMAC_PER_BGX];
+struct vlan_entry {
+ TAILQ_ENTRY(vlan_entry) next;
+ uint16_t vlan_id;
+};
+
+TAILQ_HEAD(octeontx_vlan_filter_tbl, vlan_entry);
+
+struct octeontx_vlan_info {
+ struct octeontx_vlan_filter_tbl fltr_tbl;
+ uint8_t filter_on;
+};
+
/* Octeontx ethdev nic */
struct octeontx_nic {
struct rte_eth_dev *dev;
uint16_t rx_offload_flags;
uint64_t tx_offloads;
uint16_t tx_offload_flags;
+ struct octeontx_vlan_info vlan_info;
} __rte_cache_aligned;
struct octeontx_txq {
void
octeontx_set_tx_function(struct rte_eth_dev *dev);
+
+/* VLAN */
+int octeontx_dev_vlan_offload_init(struct rte_eth_dev *dev);
+int octeontx_dev_vlan_offload_fini(struct rte_eth_dev *eth_dev);
+int octeontx_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+int octeontx_dev_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+
#endif /* __OCTEONTX_ETHDEV_H__ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_malloc.h>
+
+#include "octeontx_ethdev.h"
+#include "octeontx_logs.h"
+#include "octeontx_rxtx.h"
+
+static int
+octeontx_vlan_hw_filter(struct octeontx_nic *nic, uint8_t flag)
+{
+ struct octeontx_vlan_info *vlan = &nic->vlan_info;
+ pki_port_vlan_filter_config_t fltr_conf;
+ int rc = 0;
+
+ if (vlan->filter_on == flag)
+ return rc;
+
+ fltr_conf.port_type = OCTTX_PORT_TYPE_NET;
+ fltr_conf.fltr_conf = flag;
+
+ rc = octeontx_pki_port_vlan_fltr_config(nic->port_id, &fltr_conf);
+ if (rc != 0) {
+ octeontx_log_err("Fail to configure vlan hw filter for port %d",
+ nic->port_id);
+ goto done;
+ }
+
+ vlan->filter_on = flag;
+
+done:
+ return rc;
+}
+
+int
+octeontx_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct rte_eth_rxmode *rxmode;
+ int rc = 0;
+
+ rxmode = &dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ octeontx_log_err("Extend offload not supported");
+ return -ENOTSUP;
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ octeontx_log_err("VLAN strip offload not supported");
+ return -ENOTSUP;
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ rc = octeontx_vlan_hw_filter(nic, true);
+ if (rc)
+ goto done;
+
+ nic->rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ nic->rx_offload_flags |= OCCTX_RX_VLAN_FLTR_F;
+ } else {
+ rc = octeontx_vlan_hw_filter(nic, false);
+ if (rc)
+ goto done;
+
+ nic->rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ nic->rx_offload_flags &= ~OCCTX_RX_VLAN_FLTR_F;
+ }
+ }
+
+done:
+ return rc;
+}
+
+int
+octeontx_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct octeontx_vlan_info *vlan = &nic->vlan_info;
+ pki_port_vlan_filter_entry_config_t fltr_entry;
+ struct vlan_entry *entry = NULL;
+ int entry_count = 0;
+ int rc = -EINVAL;
+
+ if (on) {
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next)
+ if (entry->vlan_id == vlan_id) {
+ octeontx_log_dbg("Vlan Id is already set");
+ return 0;
+ }
+ } else {
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next)
+ entry_count++;
+
+ if (!entry_count)
+ return 0;
+ }
+
+ fltr_entry.port_type = OCTTX_PORT_TYPE_NET;
+ fltr_entry.vlan_tpid = RTE_ETHER_TYPE_VLAN;
+ fltr_entry.vlan_id = vlan_id;
+ fltr_entry.entry_conf = on;
+
+ if (on) {
+ entry = rte_zmalloc("octeontx_nic_vlan_entry",
+ sizeof(struct vlan_entry), 0);
+ if (!entry) {
+ octeontx_log_err("Failed to allocate memory");
+ return -ENOMEM;
+ }
+ }
+
+ rc = octeontx_pki_port_vlan_fltr_entry_config(nic->port_id,
+ &fltr_entry);
+ if (rc != 0) {
+ octeontx_log_err("Fail to configure vlan filter entry "
+ "for port %d", nic->port_id);
+ if (entry)
+ rte_free(entry);
+
+ goto done;
+ }
+
+ if (on) {
+ entry->vlan_id = vlan_id;
+ TAILQ_INSERT_HEAD(&vlan->fltr_tbl, entry, next);
+ } else {
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
+ if (entry->vlan_id == vlan_id) {
+ TAILQ_REMOVE(&vlan->fltr_tbl, entry, next);
+ rte_free(entry);
+ break;
+ }
+ }
+ }
+
+done:
+ return rc;
+}
+
+int
+octeontx_dev_vlan_offload_init(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int rc;
+
+ TAILQ_INIT(&nic->vlan_info.fltr_tbl);
+
+ rc = octeontx_dev_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+ if (rc)
+ octeontx_log_err("Failed to set vlan offload rc=%d", rc);
+
+ return rc;
+}
+
+int
+octeontx_dev_vlan_offload_fini(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct octeontx_vlan_info *vlan = &nic->vlan_info;
+ pki_port_vlan_filter_entry_config_t fltr_entry;
+ struct vlan_entry *entry;
+ int rc = 0;
+
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
+ fltr_entry.port_type = OCTTX_PORT_TYPE_NET;
+ fltr_entry.vlan_tpid = RTE_ETHER_TYPE_VLAN;
+ fltr_entry.vlan_id = entry->vlan_id;
+ fltr_entry.entry_conf = 0;
+
+ rc = octeontx_pki_port_vlan_fltr_entry_config(nic->port_id,
+ &fltr_entry);
+ if (rc != 0) {
+ octeontx_log_err("Fail to configure vlan filter entry "
+ "for port %d", nic->port_id);
+ break;
+ }
+ }
+
+ return rc;
+}
#define OCCTX_RX_OFFLOAD_NONE (0)
#define OCCTX_RX_OFFLOAD_RSS_F BIT(0)
+#define OCCTX_RX_VLAN_FLTR_F BIT(1)
#define OCCTX_RX_MULTI_SEG_F BIT(15)
#define OCCTX_TX_OFFLOAD_NONE (0)