static int ngbe_dev_close(struct rte_eth_dev *dev);
static int ngbe_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
+static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
+static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
+ uint16_t queue);
static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
static void ngbe_dev_interrupt_delayed_handler(void *param);
static void ngbe_configure_msix(struct rte_eth_dev *dev);
+#define NGBE_SET_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+ (h)->bitmap[idx] |= 1 << bit;\
+ } while (0)
+
+#define NGBE_CLEAR_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+ (h)->bitmap[idx] &= ~(1 << bit);\
+ } while (0)
+
+#define NGBE_GET_HWSTRIP(h, q, r) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+ (r) = (h)->bitmap[idx] >> bit & 1;\
+ } while (0)
+
/*
* The set of PCI devices this driver supports
*/
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
+ struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
+ struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
const struct rte_memzone *mz;
uint32_t ctrl_ext;
return -ENOMEM;
}
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ /* initialize the hw strip bitmap*/
+ memset(hwstrip, 0, sizeof(*hwstrip));
+
ctrl_ext = rd32(hw, NGBE_PORTCTL);
/* let hardware know driver is loaded */
ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
.remove = eth_ngbe_pci_remove,
};
+static int
+ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
+ uint32_t vfta;
+ uint32_t vid_idx;
+ uint32_t vid_bit;
+
+ vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
+ vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
+ vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
+ if (on)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+ wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
+
+ /* update local VFTA copy */
+ shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static void
+ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ struct ngbe_rx_queue *rxq;
+ bool restart;
+ uint32_t rxcfg, rxbal, rxbah;
+
+ if (on)
+ ngbe_vlan_hw_strip_enable(dev, queue);
+ else
+ ngbe_vlan_hw_strip_disable(dev, queue);
+
+ rxq = dev->data->rx_queues[queue];
+ rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
+ rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
+ rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+ restart = (rxcfg & NGBE_RXCFG_ENA) &&
+ !(rxcfg & NGBE_RXCFG_VLAN);
+ rxcfg |= NGBE_RXCFG_VLAN;
+ } else {
+ restart = (rxcfg & NGBE_RXCFG_ENA) &&
+ (rxcfg & NGBE_RXCFG_VLAN);
+ rxcfg &= ~NGBE_RXCFG_VLAN;
+ }
+ rxcfg &= ~NGBE_RXCFG_ENA;
+
+ if (restart) {
+ /* set vlan strip for ring */
+ ngbe_dev_rx_queue_stop(dev, queue);
+ wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
+ wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
+ wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
+ ngbe_dev_rx_queue_start(dev, queue);
+ }
+}
+
+static int
+ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ int ret = 0;
+ uint32_t portctrl, vlan_ext, qinq;
+
+ portctrl = rd32(hw, NGBE_PORTCTL);
+
+ vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
+ qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
+ switch (vlan_type) {
+ case RTE_ETH_VLAN_TYPE_INNER:
+ if (vlan_ext) {
+ wr32m(hw, NGBE_VLANCTL,
+ NGBE_VLANCTL_TPID_MASK,
+ NGBE_VLANCTL_TPID(tpid));
+ wr32m(hw, NGBE_DMATXCTRL,
+ NGBE_DMATXCTRL_TPID_MASK,
+ NGBE_DMATXCTRL_TPID(tpid));
+ } else {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR,
+ "Inner type is not supported by single VLAN");
+ }
+
+ if (qinq) {
+ wr32m(hw, NGBE_TAGTPID(0),
+ NGBE_TAGTPID_LSB_MASK,
+ NGBE_TAGTPID_LSB(tpid));
+ }
+ break;
+ case RTE_ETH_VLAN_TYPE_OUTER:
+ if (vlan_ext) {
+ /* Only the high 16-bits is valid */
+ wr32m(hw, NGBE_EXTAG,
+ NGBE_EXTAG_VLAN_MASK,
+ NGBE_EXTAG_VLAN(tpid));
+ } else {
+ wr32m(hw, NGBE_VLANCTL,
+ NGBE_VLANCTL_TPID_MASK,
+ NGBE_VLANCTL_TPID(tpid));
+ wr32m(hw, NGBE_DMATXCTRL,
+ NGBE_DMATXCTRL_TPID_MASK,
+ NGBE_DMATXCTRL_TPID(tpid));
+ }
+
+ if (qinq) {
+ wr32m(hw, NGBE_TAGTPID(0),
+ NGBE_TAGTPID_MSB_MASK,
+ NGBE_TAGTPID_MSB(tpid));
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+void
+ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t vlnctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Filter Table Disable */
+ vlnctrl = rd32(hw, NGBE_VLANCTL);
+ vlnctrl &= ~NGBE_VLANCTL_VFE;
+ wr32(hw, NGBE_VLANCTL, vlnctrl);
+}
+
+void
+ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
+ uint32_t vlnctrl;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Filter Table Enable */
+ vlnctrl = rd32(hw, NGBE_VLANCTL);
+ vlnctrl &= ~NGBE_VLANCTL_CFIENA;
+ vlnctrl |= NGBE_VLANCTL_VFE;
+ wr32(hw, NGBE_VLANCTL, vlnctrl);
+
+ /* write whatever is in local vfta copy */
+ for (i = 0; i < NGBE_VFTA_SIZE; i++)
+ wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
+}
+
+void
+ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
+{
+ struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
+ struct ngbe_rx_queue *rxq;
+
+ if (queue >= NGBE_MAX_RX_QUEUE_NUM)
+ return;
+
+ if (on)
+ NGBE_SET_HWSTRIP(hwstrip, queue);
+ else
+ NGBE_CLEAR_HWSTRIP(hwstrip, queue);
+
+ if (queue >= dev->data->nb_rx_queues)
+ return;
+
+ rxq = dev->data->rx_queues[queue];
+
+ if (on) {
+ rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ } else {
+ rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ }
+}
+
+static void
+ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, NGBE_RXCFG(queue));
+ ctrl &= ~NGBE_RXCFG_VLAN;
+ wr32(hw, NGBE_RXCFG(queue), ctrl);
+
+ /* record those setting for HW strip per queue */
+ ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
+}
+
+static void
+ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, NGBE_RXCFG(queue));
+ ctrl |= NGBE_RXCFG_VLAN;
+ wr32(hw, NGBE_RXCFG(queue), ctrl);
+
+ /* record those setting for HW strip per queue */
+ ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
+}
+
+static void
+ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, NGBE_PORTCTL);
+ ctrl &= ~NGBE_PORTCTL_VLANEXT;
+ ctrl &= ~NGBE_PORTCTL_QINQ;
+ wr32(hw, NGBE_PORTCTL, ctrl);
+}
+
+static void
+ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, NGBE_PORTCTL);
+ ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
+ wr32(hw, NGBE_PORTCTL, ctrl);
+}
+
+static void
+ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, NGBE_PORTCTL);
+ ctrl &= ~NGBE_PORTCTL_QINQ;
+ wr32(hw, NGBE_PORTCTL, ctrl);
+}
+
+static void
+ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, NGBE_PORTCTL);
+ ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
+ wr32(hw, NGBE_PORTCTL, ctrl);
+}
+
+void
+ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+ struct ngbe_rx_queue *rxq;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ ngbe_vlan_hw_strip_enable(dev, i);
+ else
+ ngbe_vlan_hw_strip_disable(dev, i);
+ }
+}
+
+void
+ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
+{
+ uint16_t i;
+ struct rte_eth_rxmode *rxmode;
+ struct ngbe_rx_queue *rxq;
+
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ }
+ else
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ }
+ }
+}
+
+static int
+ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
+
+ if (mask & RTE_ETH_VLAN_STRIP_MASK)
+ ngbe_vlan_hw_strip_config(dev);
+
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+ ngbe_vlan_hw_filter_enable(dev);
+ else
+ ngbe_vlan_hw_filter_disable(dev);
+ }
+
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+ ngbe_vlan_hw_extend_enable(dev);
+ else
+ ngbe_vlan_hw_extend_disable(dev);
+ }
+
+ if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+ ngbe_qinq_hw_strip_enable(dev);
+ else
+ ngbe_qinq_hw_strip_disable(dev);
+ }
+
+ return 0;
+}
+
+static int
+ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ ngbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ ngbe_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
static int
ngbe_dev_configure(struct rte_eth_dev *dev)
{
bool link_up = false, negotiate = false;
uint32_t speed = 0;
uint32_t allowed_speeds = 0;
+ int mask = 0;
int status;
uint32_t *link_speeds;
goto error;
}
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
+ err = ngbe_vlan_offload_config(dev, mask);
+ if (err != 0) {
+ PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
+ goto error;
+ }
+
+ ngbe_configure_port(dev);
+
err = ngbe_dev_rxtx_start(dev);
if (err < 0) {
PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024;
dev_info->max_rx_pktlen = 15872;
+ dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);
dev_info->tx_queue_offload_capa = 0;
.dev_reset = ngbe_dev_reset,
.link_update = ngbe_dev_link_update,
.dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get,
+ .vlan_filter_set = ngbe_vlan_filter_set,
+ .vlan_tpid_set = ngbe_vlan_tpid_set,
+ .vlan_offload_set = ngbe_vlan_offload_set,
+ .vlan_strip_queue_set = ngbe_vlan_strip_queue_set,
.rx_queue_start = ngbe_dev_rx_queue_start,
.rx_queue_stop = ngbe_dev_rx_queue_stop,
.tx_queue_start = ngbe_dev_tx_queue_start,
RTE_MBUF_F_TX_OUTER_IPV4 |
RTE_MBUF_F_TX_IPV6 |
RTE_MBUF_F_TX_IPV4 |
+ RTE_MBUF_F_TX_VLAN |
RTE_MBUF_F_TX_L4_MASK |
RTE_MBUF_F_TX_TCP_SEG |
RTE_MBUF_F_TX_TUNNEL_MASK |
vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);
}
+ if (ol_flags & RTE_MBUF_F_TX_VLAN) {
+ tx_offload_mask.vlan_tci |= ~0;
+ vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci);
+ }
+
txq->ctx_cache[ctx_idx].flags = ol_flags;
txq->ctx_cache[ctx_idx].tx_offload.data[0] =
tx_offload_mask.data[0] & tx_offload.data[0];
tmp |= NGBE_TXD_IPCS;
tmp |= NGBE_TXD_L4CS;
}
+ if (ol_flags & RTE_MBUF_F_TX_VLAN)
+ tmp |= NGBE_TXD_CC;
return tmp;
}
{
uint32_t cmdtype = 0;
+ if (ol_flags & RTE_MBUF_F_TX_VLAN)
+ cmdtype |= NGBE_TXD_VLE;
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
cmdtype |= NGBE_TXD_TSE;
return cmdtype;
/* L2 level */
ptype = RTE_PTYPE_L2_ETHER;
+ if (oflags & RTE_MBUF_F_TX_VLAN)
+ ptype |= RTE_PTYPE_L2_ETHER_VLAN;
/* L3 level */
if (oflags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM))
tx_offload.l2_len = tx_pkt->l2_len;
tx_offload.l3_len = tx_pkt->l3_len;
tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.vlan_tci = tx_pkt->vlan_tci;
tx_offload.tso_segsz = tx_pkt->tso_segsz;
tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
return ngbe_decode_ptype(ptid);
}
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
+{
+ uint64_t pkt_flags;
+
+ /*
+ * Check if VLAN present only.
+ * Do not check whether L3/L4 rx checksum done by NIC or not,
+ * That can be found from rte_eth_rxmode.offloads flag
+ */
+ pkt_flags = (rx_status & NGBE_RXD_STAT_VLAN &&
+ vlan_flags & RTE_MBUF_F_RX_VLAN_STRIPPED)
+ ? vlan_flags : 0;
+
+ return pkt_flags;
+}
+
static inline uint64_t
rx_desc_error_to_pkt_flags(uint32_t rx_status)
{
rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
/* convert descriptor fields to rte mbuf flags */
- pkt_flags = rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags = rx_desc_status_to_pkt_flags(s[j],
+ rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
mb->ol_flags = pkt_flags;
mb->packet_type =
ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
* - Rx port identifier.
* 2) integrate hardware offload data, if any:
* - IP checksum flag,
+ * - VLAN TCI, if any,
* - error flags.
*/
pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
rxm->port = rxq->port_id;
pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
- pkt_flags = rx_desc_error_to_pkt_flags(staterr);
+ /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
+
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr,
+ rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
rxm->ol_flags = pkt_flags;
rxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
NGBE_PTID_MASK);
* - RX port identifier
* - hardware offload data, if any:
* - IP checksum flag
+ * - VLAN TCI, if any
* - error flags
* @head HEAD of the packet cluster
* @desc HW descriptor to get data from
head->port = rxq->port_id;
+ /* The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
+ * set in the pkt_flags field.
+ */
+ head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
- pkt_flags = rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
head->ol_flags = pkt_flags;
head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
NGBE_PTID_MASK);
ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
{
uint64_t tx_offload_capa;
-
- RTE_SET_USED(dev);
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
tx_offload_capa =
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+ if (hw->is_pf)
+ tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
+
return tx_offload_capa;
}
}
uint64_t
-ngbe_get_rx_port_offloads(struct rte_eth_dev *dev __rte_unused)
+ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
+{
+ return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+}
+
+uint64_t
+ngbe_get_rx_port_offloads(struct rte_eth_dev *dev)
{
uint64_t offloads;
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
RTE_ETH_RX_OFFLOAD_SCATTER;
+ if (hw->is_pf)
+ offloads |= (RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
+
return offloads;
}
struct ngbe_hw *hw;
uint16_t len;
struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = ngbe_dev_hw(dev);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/* Free memory prior to re-allocation if needed... */
if (dev->data->rx_queues[queue_idx] != NULL) {
ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
/*
* Allocate Rx ring hardware descriptors. A memzone large enough to
dev->data->nb_tx_queues = 0;
}
+void ngbe_configure_port(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ int i = 0;
+ uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
+ 0x9100, 0x9200,
+ 0x0000, 0x0000,
+ 0x0000, 0x0000};
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* default outer vlan tpid */
+ wr32(hw, NGBE_EXTAG,
+ NGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
+ NGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
+
+ /* default inner vlan tpid */
+ wr32m(hw, NGBE_VLANCTL,
+ NGBE_VLANCTL_TPID_MASK,
+ NGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
+ wr32m(hw, NGBE_DMATXCTRL,
+ NGBE_DMATXCTRL_TPID_MASK,
+ NGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
+
+ /* default vlan tpid filters */
+ for (i = 0; i < 8; i++) {
+ wr32m(hw, NGBE_TAGTPID(i / 2),
+ (i % 2 ? NGBE_TAGTPID_MSB_MASK
+ : NGBE_TAGTPID_LSB_MASK),
+ (i % 2 ? NGBE_TAGTPID_MSB(tpids[i])
+ : NGBE_TAGTPID_LSB(tpids[i])));
+ }
+}
+
static int
ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)
{
wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
NGBE_FRMSZ_MAX(dev->data->mtu + NGBE_ETH_OVERHEAD));
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+
/* Setup Rx queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
srrctl |= NGBE_RXCFG_PKTLEN(buf_size);
wr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if (dev->data->mtu + NGBE_ETH_OVERHEAD +
+ 2 * NGBE_VLAN_TAG_SIZE > buf_size)
+ dev->data->scattered_rx = 1;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)