/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/**
* Structure to check if new context need be built
*/
+
struct ixgbe_advctx_info {
uint16_t flags; /**< ol_flags for context build. */
uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
- uint32_t vlan_macip_lens; /**< vlan, mac ip length. */
+ union rte_vlan_macip vlan_macip_lens; /**< vlan, mac ip length. */
};
/**
txq->ctx_cache[ctx_idx].flags = ol_flags;
txq->ctx_cache[ctx_idx].cmp_mask = cmp_mask;
- txq->ctx_cache[ctx_idx].vlan_macip_lens = vlan_macip_lens & cmp_mask;
+ txq->ctx_cache[ctx_idx].vlan_macip_lens.data =
+ vlan_macip_lens & cmp_mask;
ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
{
/* If match with the current used context */
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+ (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
(txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
return txq->ctx_curr;
}
/* What if match with the next context */
txq->ctx_curr ^= 1;
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+ (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
(txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
return txq->ctx_curr;
}
uint16_t nb_used;
uint16_t tx_ol_req;
uint32_t vlan_macip_lens;
- uint32_t ctx;
+ uint32_t ctx = 0;
uint32_t new_ctx;
txq = tx_queue;
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->pkt.vlan_tci << 16 |
- tx_pkt->pkt.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT |
- tx_pkt->pkt.l3_len;
+ vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
/* If hardware offload required */
tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK;
if (tx_ol_req) {
/* If new context need be built or reuse the exist ctx. */
- ctx = what_advctx_update(txq, tx_ol_req, vlan_macip_lens);
+ ctx = what_advctx_update(txq, tx_ol_req,
+ vlan_macip_lens);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IXGBE_CTX_NUM);
ctx = txq->ctx_curr;
/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
uint16_t
ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+ uint16_t nb_pkts)
{
uint16_t nb_rx;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ rxm->pkt.vlan_macip.f.vlan_tci =
+ rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->pkt.vlan_tci =
+ first_seg->pkt.vlan_macip.f.vlan_tci =
rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, (uint64_t) ring_size,
+ return rte_memzone_reserve_aligned(z_name, ring_size,
socket_id, 0, IXGBE_ALIGN);
}
ixgbe_tx_queue_release(struct igb_tx_queue *txq)
{
if (txq != NULL) {
- ixgbe_tx_queue_release_mbufs(txq);
- rte_free(txq->sw_ring);
- rte_free(txq);
+ ixgbe_tx_queue_release_mbufs(txq);
+ rte_free(txq->sw_ring);
+ rte_free(txq);
}
}
RTE_LOG(ERR, PMD,
"tx_rs_thresh must be less than the "
"number of TX descriptors minus 2. "
- "(tx_rs_thresh=%u port=%d queue=%d)",
+ "(tx_rs_thresh=%u port=%d queue=%d)\n",
tx_rs_thresh, dev->data->port_id, queue_idx);
return -(EINVAL);
}
"tx_rs_thresh must be less than the "
"tx_free_thresh must be less than the "
"number of TX descriptors minus 3. "
- "(tx_free_thresh=%u port=%d queue=%d)",
+ "(tx_free_thresh=%u port=%d queue=%d)\n",
tx_free_thresh, dev->data->port_id, queue_idx);
return -(EINVAL);
}
*/
if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
RTE_LOG(ERR, PMD,
- "TX WTHRESH should be set to 0 if "
+ "TX WTHRESH must be set to 0 if "
"tx_rs_thresh is greater than 1. "
- "TX WTHRESH will be set to 0. "
- "(tx_rs_thresh=%u port=%d queue=%d)",
+ "(tx_rs_thresh=%u port=%d queue=%d)\n",
tx_rs_thresh,
dev->data->port_id, queue_idx);
return -(EINVAL);
(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST))
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
else
- dev->tx_pkt_burst = ixgbe_xmit_pkts;
+ dev->tx_pkt_burst = ixgbe_xmit_pkts;
return (0);
}
static void
ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
{
- ixgbe_rx_queue_release_mbufs(rxq);
- rte_free(rxq->sw_ring);
- rte_free(rxq);
+ if (rxq != NULL) {
+ ixgbe_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ }
}
void
return ret;
}
-/* (Re)set dynamic igb_rx_queue fields to defaults */
+/* Reset dynamic igb_rx_queue fields back to defaults */
static void
ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
{
unsigned i;
+ uint16_t len;
/*
* By default, the Rx queue setup function allocates enough memory for
const struct rte_memzone *rz;
struct igb_rx_queue *rxq;
struct ixgbe_hw *hw;
+ int use_def_burst_func = 1;
+ uint16_t len;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
rxq->port_id = dev->data->port_id;
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
ETHER_CRC_LEN);
+ rxq->drop_en = rx_conf->rx_drop_en;
/*
- * Allocate TX ring hardware descriptors. A memzone large enough to
+ * Allocate RX ring hardware descriptors. A memzone large enough to
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
len = nb_desc;
#endif
rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
- sizeof(struct igb_rx_entry) * nb_desc,
+ sizeof(struct igb_rx_entry) * len,
CACHE_LINE_SIZE);
if (rxq->sw_ring == NULL) {
ixgbe_rx_queue_release(rxq);
{
unsigned i;
+ PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct igb_tx_queue *txq = dev->data->tx_queues[i];
- ixgbe_tx_queue_release_mbufs(txq);
- ixgbe_reset_tx_queue(txq);
+ if (txq != NULL) {
+ ixgbe_tx_queue_release_mbufs(txq);
+ ixgbe_reset_tx_queue(txq);
+ }
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct igb_rx_queue *rxq = dev->data->rx_queues[i];
- ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(rxq);
+ if (rxq != NULL) {
+ ixgbe_rx_queue_release_mbufs(rxq);
+ ixgbe_reset_rx_queue(rxq);
+ }
}
}
(unsigned) rxq->queue_id);
return (-ENOMEM);
}
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->type = RTE_MBUF_PKT;
+ mbuf->pkt.next = NULL;
+ mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mbuf->pkt.nb_segs = 1;
+ mbuf->pkt.in_port = rxq->port_id;
+
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
rxd = &rxq->rx_ring[i];
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
+
/*
* Configure the RX buffer size in the BSIZEPACKET field of
* the SRRCTL register of the queue.
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ IXGBE_RX_BUF_THRESHOLD > buf_size){
dev->data->scattered_rx = 1;
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}
case ixgbe_mac_82598EB:
txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL(i));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i),
txctrl);
break;
default:
txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL_82599(i));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i),
txctrl);
break;
/* Allocate buffers for descriptor rings */
ret = ixgbe_alloc_rx_queue_mbufs(rxq);
- if (ret){
- return -1;
- }
+ if (ret)
+ return ret;
+
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
+
/*
* Configure the RX buffer size in the BSIZEPACKET field of
* the SRRCTL register of the queue.
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}
}
+
return 0;
}
*/
txctrl = IXGBE_READ_REG(hw,
IXGBE_VFDCA_TXCTRL(i));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
txctrl);
}