*/
#include <rte_config.h>
+#include <rte_flow.h>
#include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_net.h>
#include "igc_logs.h"
rte_free(rxq);
}
-void eth_igc_rx_queue_release(void *rxq)
+void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- if (rxq)
- igc_rx_queue_release(rxq);
+ if (dev->data->rx_queues[qid])
+ igc_rx_queue_release(dev->data->rx_queues[qid]);
}
-uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id)
+uint32_t eth_igc_rx_queue_count(void *rx_queue)
{
/**
* Check the DD bit of a rx descriptor of each 4 in a group,
struct igc_rx_queue *rxq;
uint16_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &rxq->rx_ring[rxq->rx_tail];
while (desc < rxq->nb_rx_desc - rxq->rx_tail) {
return desc;
}
-int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
- volatile union igc_adv_rx_desc *rxdp;
- struct igc_rx_queue *rxq = rx_queue;
- uint32_t desc;
-
- if (unlikely(!rxq || offset >= rxq->nb_rx_desc))
- return 0;
-
- desc = rxq->rx_tail + offset;
- if (desc >= rxq->nb_rx_desc)
- desc -= rxq->nb_rx_desc;
-
- rxdp = &rxq->rx_ring[desc];
- return !!(rxdp->wb.upper.status_error &
- rte_cpu_to_le_32(IGC_RXD_STAT_DD));
-}
-
int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
struct igc_rx_queue *rxq = rx_queue;
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
};
-static void
+void
igc_rss_disable(struct rte_eth_dev *dev)
{
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
igc_hw_rss_hash_set(hw, &rss_conf);
}
+int
+igc_del_rss_filter(struct rte_eth_dev *dev)
+{
+ struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
+
+ if (rss_filter->enable) {
+ /* recover default RSS configuration */
+ igc_rss_configure(dev);
+
+ /* disable RSS logic and clear filter data */
+ igc_rss_disable(dev);
+ memset(rss_filter, 0, sizeof(*rss_filter));
+ return 0;
+ }
+ PMD_DRV_LOG(ERR, "filter not exist!");
+ return -ENOENT;
+}
+
+/* Initiate the filter structure by the structure of rte_flow_action_rss */
+void
+igc_rss_conf_set(struct igc_rss_filter *out,
+ const struct rte_flow_action_rss *rss)
+{
+ out->conf.func = rss->func;
+ out->conf.level = rss->level;
+ out->conf.types = rss->types;
+
+ if (rss->key_len == sizeof(out->key)) {
+ memcpy(out->key, rss->key, rss->key_len);
+ out->conf.key = out->key;
+ out->conf.key_len = rss->key_len;
+ } else {
+ out->conf.key = NULL;
+ out->conf.key_len = 0;
+ }
+
+ if (rss->queue_num <= IGC_RSS_RDT_SIZD) {
+ memcpy(out->queue, rss->queue,
+ sizeof(*out->queue) * rss->queue_num);
+ out->conf.queue = out->queue;
+ out->conf.queue_num = rss->queue_num;
+ } else {
+ out->conf.queue = NULL;
+ out->conf.queue_num = 0;
+ }
+}
+
+int
+igc_add_rss_filter(struct rte_eth_dev *dev, struct igc_rss_filter *rss)
+{
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = rss->conf.key_len ?
+ (void *)(uintptr_t)rss->conf.key : NULL,
+ .rss_key_len = rss->conf.key_len,
+ .rss_hf = rss->conf.types,
+ };
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
+ uint32_t i, j;
+
+ /* check RSS type is valid */
+ if ((rss_conf.rss_hf & IGC_RSS_OFFLOAD_ALL) == 0) {
+ PMD_DRV_LOG(ERR,
+ "RSS type(0x%" PRIx64 ") error!, only 0x%" PRIx64
+ " been supported", rss_conf.rss_hf,
+ (uint64_t)IGC_RSS_OFFLOAD_ALL);
+ return -EINVAL;
+ }
+
+ /* check queue count is not zero */
+ if (!rss->conf.queue_num) {
+ PMD_DRV_LOG(ERR, "Queue number should not be 0!");
+ return -EINVAL;
+ }
+
+ /* check queue id is valid */
+ for (i = 0; i < rss->conf.queue_num; i++)
+ if (rss->conf.queue[i] >= dev->data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Queue id %u is invalid!",
+ rss->conf.queue[i]);
+ return -EINVAL;
+ }
+
+ /* only support one filter */
+ if (rss_filter->enable) {
+ PMD_DRV_LOG(ERR, "Only support one RSS filter!");
+ return -ENOTSUP;
+ }
+ rss_filter->enable = 1;
+
+ igc_rss_conf_set(rss_filter, &rss->conf);
+
+ /* Fill in redirection table. */
+ for (i = 0, j = 0; i < IGC_RSS_RDT_SIZD; i++, j++) {
+ union igc_rss_reta_reg reta;
+ uint16_t q_idx, reta_idx;
+
+ if (j == rss->conf.queue_num)
+ j = 0;
+ q_idx = rss->conf.queue[j];
+ reta_idx = i % sizeof(reta);
+ reta.bytes[reta_idx] = q_idx;
+ if (reta_idx == sizeof(reta) - 1)
+ IGC_WRITE_REG_LE_VALUE(hw,
+ IGC_RETA(i / sizeof(reta)), reta.dword);
+ }
+
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = default_rss_key;
+ igc_hw_rss_hash_set(hw, &rss_conf);
+ return 0;
+}
+
+void
+igc_clear_rss_filter(struct rte_eth_dev *dev)
+{
+ struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
+
+ if (!rss_filter->enable)
+ return;
+
+ /* recover default RSS configuration */
+ igc_rss_configure(dev);
+
+ /* disable RSS logic and clear filter data */
+ igc_rss_disable(dev);
+ memset(rss_filter, 0, sizeof(*rss_filter));
+}
+
static int
igc_dev_mq_rx_configure(struct rte_eth_dev *dev)
{
struct igc_rx_queue *rxq;
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
- uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint32_t max_rx_pktlen;
uint32_t rctl;
uint32_t rxcsum;
uint16_t buf_size;
IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
/* Configure support of jumbo frames, if any. */
- if (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev->data->mtu & RTE_ETHER_MTU)
rctl |= IGC_RCTL_LPE;
-
- /*
- * Set maximum packet length by default, and might be updated
- * together with enabling/disabling dual VLAN.
- */
- IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pkt_len);
- } else {
+ else
rctl &= ~IGC_RCTL_LPE;
- }
+
+ max_rx_pktlen = dev->data->mtu + IGC_ETH_OVERHEAD;
+ /*
+ * Set maximum packet length by default, and might be updated
+ * together with enabling/disabling dual VLAN.
+ */
+ IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pktlen);
/* Configure and enable each RX queue. */
rctl_bsize = 0;
IGC_SRRCTL_BSIZEPKT_SHIFT);
/* It adds dual VLAN length for supporting dual VLAN */
- if (max_rx_pkt_len + 2 * VLAN_TAG_SIZE > buf_size)
+ if (max_rx_pktlen > buf_size)
dev->data->scattered_rx = 1;
} else {
/*
IGC_RCTL_DPF |
(hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
+ if (dev->data->dev_conf.lpbk_mode == 1)
+ rctl |= IGC_RCTL_LBM_MAC;
+
rctl &= ~(IGC_RCTL_HSEL_MSK | IGC_RCTL_CFIEN | IGC_RCTL_CFI |
IGC_RCTL_PSP | IGC_RCTL_PMCF);
* This needs to be done after enable.
*/
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ uint32_t dvmolr;
+
rxq = dev->data->rx_queues[i];
IGC_WRITE_REG(hw, IGC_RDH(rxq->reg_idx), 0);
- IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx),
- rxq->nb_rx_desc - 1);
+ IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+
+ dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->reg_idx));
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ dvmolr |= IGC_DVMOLR_STRVLAN;
+ else
+ dvmolr &= ~IGC_DVMOLR_STRVLAN;
+
+ if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ dvmolr &= ~IGC_DVMOLR_STRCRC;
+ else
+ dvmolr |= IGC_DVMOLR_STRCRC;
+
+ IGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr);
}
return 0;
return i;
}
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rte_errno = -ret;
rte_free(txq);
}
-void eth_igc_tx_queue_release(void *txq)
+void eth_igc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- if (txq)
- igc_tx_queue_release(txq);
+ if (dev->data->tx_queues[qid])
+ igc_tx_queue_release(dev->data->tx_queues[qid]);
}
static void
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
qinfo->conf.offloads = txq->offloads;
}
+
+void
+eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id, int on)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+ uint32_t reg_val;
+
+ if (rx_queue_id >= IGC_QUEUE_PAIRS_NUM) {
+ PMD_DRV_LOG(ERR, "Queue index(%u) illegal, max is %u",
+ rx_queue_id, IGC_QUEUE_PAIRS_NUM - 1);
+ return;
+ }
+
+ reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));
+ if (on) {
+ reg_val |= IGC_DVMOLR_STRVLAN;
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ } else {
+ reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN);
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+
+ IGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val);
+}