uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint32_t flags; /**< RX flags. */
uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
+ const struct rte_memzone *mz;
};
/**
struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
/**< Hardware context history.*/
uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+ const struct rte_memzone *mz;
};
#if 1
if (txq != NULL) {
igb_tx_queue_release_mbufs(txq);
rte_free(txq->sw_ring);
+ rte_memzone_free(txq->mz);
rte_free(txq);
}
}
void
-eth_igb_tx_queue_release(void *txq)
+eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- igb_tx_queue_release(txq);
+ igb_tx_queue_release(dev->data->tx_queues[qid]);
}
static int
return -ENOMEM;
}
+ txq->mz = tz;
txq->nb_tx_desc = nb_desc;
txq->pthresh = tx_conf->tx_thresh.pthresh;
txq->hthresh = tx_conf->tx_thresh.hthresh;
if (rxq != NULL) {
igb_rx_queue_release_mbufs(rxq);
rte_free(rxq->sw_ring);
+ rte_memzone_free(rxq->mz);
rte_free(rxq);
}
}
void
-eth_igb_rx_queue_release(void *rxq)
+eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- igb_rx_queue_release(rxq);
+ igb_rx_queue_release(dev->data->rx_queues[qid]);
}
static void
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_RSS_HASH;
igb_rx_queue_release(rxq);
return -ENOMEM;
}
+
+ rxq->mz = rz;
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
rxq->rx_ring_phys_addr = rz->iova;
}
uint32_t
-eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+eth_igb_rx_queue_count(void *rx_queue)
{
#define IGB_RXQ_SCAN_INTERVAL 4
volatile union e1000_adv_rx_desc *rxdp;
struct igb_rx_queue *rxq;
uint32_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
return desc;
}
-int
-eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
- volatile union e1000_adv_rx_desc *rxdp;
- struct igb_rx_queue *rxq = rx_queue;
- uint32_t desc;
-
- if (unlikely(offset >= rxq->nb_rx_desc))
- return 0;
- desc = rxq->rx_tail + offset;
- if (desc >= rxq->nb_rx_desc)
- desc -= rxq->nb_rx_desc;
-
- rxdp = &rxq->rx_ring[desc];
- return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
-}
-
int
eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
uint16_t i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- eth_igb_rx_queue_release(dev->data->rx_queues[i]);
+ eth_igb_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
- rte_eth_dma_zone_free(dev, "rx_ring", i);
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- eth_igb_tx_queue_release(dev->data->tx_queues[i]);
+ eth_igb_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
- rte_eth_dma_zone_free(dev, "tx_ring", i);
}
dev->data->nb_tx_queues = 0;
}
uint32_t srrctl;
uint16_t buf_size;
uint16_t rctl_bsize;
+ uint32_t max_len;
uint16_t i;
int ret;
/*
* Configure support of jumbo frames, if any.
*/
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
+ if ((dev->data->mtu & RTE_ETHER_MTU) != 0) {
rctl |= E1000_RCTL_LPE;
/*
* Set maximum packet length by default, and might be updated
* together with enabling/disabling dual VLAN.
*/
- E1000_WRITE_REG(hw, E1000_RLPML,
- dev->data->dev_conf.rxmode.max_rx_pkt_len +
- VLAN_TAG_SIZE);
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ max_len += VLAN_TAG_SIZE;
+
+ E1000_WRITE_REG(hw, E1000_RLPML, max_len);
} else
rctl &= ~E1000_RCTL_LPE;
E1000_SRRCTL_BSIZEPKT_SHIFT);
/* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
- 2 * VLAN_TAG_SIZE) > buf_size){
+ if ((max_len + 2 * VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG,
"forcing scatter mode");
uint32_t srrctl;
uint16_t buf_size;
uint16_t rctl_bsize;
+ uint32_t max_len;
uint16_t i;
int ret;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* setup MTU */
- e1000_rlpml_set_vf(hw,
- (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
- VLAN_TAG_SIZE));
+ max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
+ e1000_rlpml_set_vf(hw, (uint16_t)(max_len + VLAN_TAG_SIZE));
/* Configure and enable each RX queue. */
rctl_bsize = 0;
E1000_SRRCTL_BSIZEPKT_SHIFT);
/* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
- 2 * VLAN_TAG_SIZE) > buf_size){
+ if ((max_len + 2 * VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG,
"forcing scatter mode");