/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
(uint64_t) ((mb)->buf_physaddr + \
(uint64_t) ((char *)((mb)->pkt.data) - \
- (char *)(mb)->buf_addr))
+ (char *)(mb)->buf_addr))
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
uint32_t txd_type; /**< Device-specific TXD type */
uint16_t nb_tx_desc; /**< number of TX descriptors. */
- uint16_t tx_tail; /**< Current value of TDT register. */
+ uint16_t tx_tail; /**< Current value of TDT register. */
uint16_t tx_head;
/**< Index of first used TX descriptor. */
uint16_t queue_id; /**< TX queue index. */
}
uint16_t
-eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
+eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
+ struct igb_tx_queue *txq;
struct igb_tx_entry *sw_ring;
struct igb_tx_entry *txe, *txn;
volatile union e1000_adv_tx_desc *txr;
uint16_t tx_last;
uint16_t nb_tx;
uint16_t tx_ol_req;
- uint32_t new_ctx;
- uint32_t ctx;
+ uint32_t new_ctx = 0;
+ uint32_t ctx = 0;
uint32_t vlan_macip_lens;
+ txq = tx_queue;
sw_ring = txq->sw_ring;
txr = txq->tx_ring;
tx_id = txq->tx_tail;
ol_flags = tx_pkt->ol_flags;
vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
- tx_ol_req = (ol_flags & PKT_TX_OFFLOAD_MASK);
+ tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
/* If a Context Descriptor need be built . */
if (tx_ol_req) {
0, 0, 0, 0,
};
- pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
+ pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
- ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+ ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
#else
- pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
- ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+ pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
+ ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
#endif
- return pkt_flags | (uint16_t) (((hl_tp_rs & 0x0F) == 0) ? 0 :
- PKT_RX_RSS_HASH);
+ return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
+ 0 : PKT_RX_RSS_HASH));
}
static inline uint16_t
uint16_t pkt_flags;
/* Check if VLAN present */
- pkt_flags = (uint16_t) (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
+ pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
+ PKT_RX_VLAN_PKT : 0);
#if defined(RTE_LIBRTE_IEEE1588)
if (rx_status & E1000_RXD_STAT_TMST)
- pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+ pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
#endif
return pkt_flags;
}
}
uint16_t
-eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
+ struct igb_rx_queue *rxq;
volatile union e1000_adv_rx_desc *rx_ring;
volatile union e1000_adv_rx_desc *rxdp;
struct igb_rx_entry *sw_ring;
nb_rx = 0;
nb_hold = 0;
+ rxq = rx_queue;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = (pkt_flags |
- rx_desc_status_to_pkt_flags(staterr));
- pkt_flags = (pkt_flags |
- rx_desc_error_to_pkt_flags(staterr));
+ pkt_flags = (uint16_t)(pkt_flags |
+ rx_desc_status_to_pkt_flags(staterr));
+ pkt_flags = (uint16_t)(pkt_flags |
+ rx_desc_error_to_pkt_flags(staterr));
rxm->ol_flags = pkt_flags;
/*
}
uint16_t
-eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
+ struct igb_rx_queue *rxq;
volatile union e1000_adv_rx_desc *rx_ring;
volatile union e1000_adv_rx_desc *rxdp;
struct igb_rx_entry *sw_ring;
nb_rx = 0;
nb_hold = 0;
+ rxq = rx_queue;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
- pkt_flags = (pkt_flags | rx_desc_error_to_pkt_flags(staterr));
+ pkt_flags = (uint16_t)(pkt_flags |
+ rx_desc_status_to_pkt_flags(staterr));
+ pkt_flags = (uint16_t)(pkt_flags |
+ rx_desc_error_to_pkt_flags(staterr));
first_seg->ol_flags = pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, (uint64_t)ring_size,
+ return rte_memzone_reserve_aligned(z_name, ring_size,
socket_id, 0, IGB_ALIGN);
}
static void
igb_tx_queue_release(struct igb_tx_queue *txq)
{
- igb_tx_queue_release_mbufs(txq);
- rte_free(txq->sw_ring);
- rte_free(txq);
+ if (txq != NULL) {
+ igb_tx_queue_release_mbufs(txq);
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ }
}
-int
-igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
+void
+eth_igb_tx_queue_release(void *txq)
{
- uint16_t i, old_nb_queues = dev->data->nb_tx_queues;
- struct igb_tx_queue **txq;
-
- if (dev->data->tx_queues == NULL) {
- dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
- sizeof(struct igb_tx_queue *) * nb_queues,
- CACHE_LINE_SIZE);
- if (dev->data->tx_queues == NULL) {
- dev->data->nb_tx_queues = 0;
- return -ENOMEM;
- }
- } else {
- if (nb_queues < old_nb_queues)
- for (i = nb_queues; i < old_nb_queues; i++)
- igb_tx_queue_release(dev->data->tx_queues[i]);
-
- if (nb_queues != old_nb_queues) {
- txq = rte_realloc(dev->data->tx_queues,
- sizeof(struct igb_tx_queue *) * nb_queues,
- CACHE_LINE_SIZE);
- if (txq == NULL)
- return -ENOMEM;
- else
- dev->data->tx_queues = txq;
- if (nb_queues > old_nb_queues)
- memset(&(txq[old_nb_queues]), 0,
- sizeof(struct igb_tx_queue *) *
- (nb_queues - old_nb_queues));
- }
- }
- dev->data->nb_tx_queues = nb_queues;
-
- return 0;
+ igb_tx_queue_release(txq);
}
static void
}
/* Initialize ring entries */
- prev = txq->nb_tx_desc - 1;
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
- size = sizeof(union e1000_adv_tx_desc) * nb_desc;
-
/* Allocate software ring */
txq->sw_ring = rte_zmalloc("txq->sw_ring",
sizeof(struct igb_tx_entry) * nb_desc,
static void
igb_rx_queue_release(struct igb_rx_queue *rxq)
{
- igb_rx_queue_release_mbufs(rxq);
- rte_free(rxq->sw_ring);
- rte_free(rxq);
+ if (rxq != NULL) {
+ igb_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ }
}
-int
-igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
+void
+eth_igb_rx_queue_release(void *rxq)
{
- uint16_t i, old_nb_queues = dev->data->nb_rx_queues;
- struct igb_rx_queue **rxq;
-
- if (dev->data->rx_queues == NULL) {
- dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
- sizeof(struct igb_rx_queue *) * nb_queues,
- CACHE_LINE_SIZE);
- if (dev->data->rx_queues == NULL) {
- dev->data->nb_rx_queues = 0;
- return -ENOMEM;
- }
- } else {
- for (i = nb_queues; i < old_nb_queues; i++) {
- igb_rx_queue_release(dev->data->rx_queues[i]);
- dev->data->rx_queues[i] = NULL;
- }
- if (nb_queues != old_nb_queues) {
- rxq = rte_realloc(dev->data->rx_queues,
- sizeof(struct igb_rx_queue *) * nb_queues,
- CACHE_LINE_SIZE);
- if (rxq == NULL)
- return -ENOMEM;
- else
- dev->data->rx_queues = rxq;
- if (nb_queues > old_nb_queues)
- memset(&(rxq[old_nb_queues]), 0,
- sizeof(struct igb_rx_queue *) *
- (nb_queues - old_nb_queues));
- }
- }
- dev->data->nb_rx_queues = nb_queues;
-
- return 0;
+ igb_rx_queue_release(rxq);
}
static void
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- igb_tx_queue_release_mbufs(txq);
- igb_reset_tx_queue(txq, dev);
+ if (txq != NULL) {
+ igb_tx_queue_release_mbufs(txq);
+ igb_reset_tx_queue(txq, dev);
+ }
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- igb_rx_queue_release_mbufs(rxq);
- igb_reset_rx_queue(rxq);
+ if (rxq != NULL) {
+ igb_rx_queue_release_mbufs(rxq);
+ igb_reset_rx_queue(rxq);
+ }
}
}
/* Allocate buffers for descriptor rings and set up queue */
ret = igb_alloc_rx_queue_mbufs(rxq);
- if (ret) {
- igb_dev_clear_queues(dev);
+ if (ret)
return ret;
- }
/*
* Reset crc_len in case it was changed after queue setup by a
E1000_SRRCTL_BSIZEPKT_MASK) <<
E1000_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len + VLAN_TAG_SIZE
+ > buf_size){
dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
dev->data->scattered_rx = 1;
}
if (dev->data->dev_conf.rxmode.hw_strip_crc) {
rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
- /* set STRCRC bit in all queues for Powerville */
- if (hw->mac.type == e1000_i350) {
+ /* set STRCRC bit in all queues for Powerville/Springville */
+ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
uint32_t dvmolr = E1000_READ_REG(hw,
E1000_DVMOLR(i));
} else {
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
- /* clear STRCRC bit in all queues for Powerville */
- if (hw->mac.type == e1000_i350) {
+ /* clear STRCRC bit in all queues for Powerville/Springville */
+ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
uint32_t dvmolr = E1000_READ_REG(hw,
E1000_DVMOLR(i));