mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
- tx_offload_mask.l4_len |= ~0;
break;
case PKT_TX_SCTP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
struct ixgbe_tx_entry *sw_ring;
struct ixgbe_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
- volatile union ixgbe_adv_tx_desc *txd;
+ volatile union ixgbe_adv_tx_desc *txd, *txp;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
txr = txq->tx_ring;
tx_id = txq->tx_tail;
txe = &sw_ring[tx_id];
+ txp = NULL;
/* Determine if the descriptor ring needs to be cleaned. */
if (txq->nb_tx_free < txq->tx_free_thresh)
*/
nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+ if (txp != NULL &&
+ nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
+ /* set RS on the previous packet in the burst */
+ txp->read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+
/*
* The number of descriptors that must be allocated for a
* packet is the number of segments of that packet, plus 1
/* Update txq RS bit counters */
txq->nb_tx_used = 0;
- }
+ txp = NULL;
+ } else
+ txp = txd;
+
txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
}
+
end_of_tx:
+ /* set RS on last packet in the burst */
+ if (txp != NULL)
+ txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+
rte_wmb();
/*
*
**********************************************************************/
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
- * also optimize cache line size effect. H/W supports up to cache line size 128.
- */
-#define IXGBE_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * descriptors should meet the following condition:
- * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
- */
-#define IXGBE_MIN_RING_DESC 32
-#define IXGBE_MAX_RING_DESC 4096
-
/*
* Create memzone for HW rings. malloc can't be used as the physical address is
* needed. If the memzone is already created, then this function returns a ptr
* It must not exceed hardware maximum, and must be multiple
* of IXGBE_ALIGN.
*/
- if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
- (nb_desc > IXGBE_MAX_RING_DESC) ||
- (nb_desc < IXGBE_MIN_RING_DESC)) {
+ if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
+ (nb_desc > IXGBE_MAX_RING_DESC) ||
+ (nb_desc < IXGBE_MIN_RING_DESC)) {
return -EINVAL;
}
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
if (tx_rs_thresh >= (nb_desc - 2)) {
PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
- "of TX descriptors minus 2. (tx_rs_thresh=%u "
- "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
- (int)dev->data->port_id, (int)queue_idx);
+ "of TX descriptors minus 2. (tx_rs_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+ if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
+ "(tx_rs_thresh=%u port=%d queue=%d)",
+ DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
if (tx_free_thresh >= (nb_desc - 3)) {
* It must not exceed hardware maximum, and must be multiple
* of IXGBE_ALIGN.
*/
- if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
- (nb_desc > IXGBE_MAX_RING_DESC) ||
- (nb_desc < IXGBE_MIN_RING_DESC)) {
+ if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
+ (nb_desc > IXGBE_MAX_RING_DESC) ||
+ (nb_desc < IXGBE_MIN_RING_DESC)) {
return (-EINVAL);
}
{
struct ixgbe_hw *hw;
uint32_t mrqc;
+ uint32_t mrqc_reg;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+ mrqc = IXGBE_READ_REG(hw, mrqc_reg);
mrqc &= ~IXGBE_MRQC_RSSEN;
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
}
static void
uint32_t rss_key;
uint64_t rss_hf;
uint16_t i;
+ uint32_t mrqc_reg;
+ uint32_t rssrk_reg;
+
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+ rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
hash_key = rss_conf->rss_key;
if (hash_key != NULL) {
rss_key |= hash_key[(i * 4) + 1] << 8;
rss_key |= hash_key[(i * 4) + 2] << 16;
rss_key |= hash_key[(i * 4) + 3] << 24;
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
+ IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
}
}
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
if (rss_hf & ETH_RSS_IPV6_UDP_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
}
int
struct ixgbe_hw *hw;
uint32_t mrqc;
uint64_t rss_hf;
+ uint32_t mrqc_reg;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!ixgbe_rss_update_sp(hw->mac.type)) {
+ PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+
/*
* Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
* "RSS enabling cannot be done dynamically while it must be
* disabled at initialization time.
*/
rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
- mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc = IXGBE_READ_REG(hw, mrqc_reg);
if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
if (rss_hf != 0) /* Enable RSS */
return -(EINVAL);
uint32_t rss_key;
uint64_t rss_hf;
uint16_t i;
+ uint32_t mrqc_reg;
+ uint32_t rssrk_reg;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+ rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
hash_key = rss_conf->rss_key;
if (hash_key != NULL) {
/* Return RSS hash key */
for (i = 0; i < 10; i++) {
- rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i);
+ rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
hash_key[(i * 4)] = rss_key & 0x000000FF;
hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
}
/* Get RSS functions configured in MRQC register */
- mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc = IXGBE_READ_REG(hw, mrqc_reg);
if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
rss_conf->rss_hf = 0;
return 0;
uint32_t reta;
uint16_t i;
uint16_t j;
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+
/*
* Fill in redirection table
* The byte-swap is needed because NIC registers are in
* little-endian order.
*/
reta = 0;
- for (i = 0, j = 0; i < 128; i++, j++) {
+ for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+
if (j == dev->data->nb_rx_queues)
j = 0;
reta = (reta << 8) | j;
if ((i & 3) == 3)
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),
+ IXGBE_WRITE_REG(hw, reta_reg,
rte_bswap32(reta));
}
#define NUM_VFTA_REGISTERS 128
#define NIC_RX_BUFFER_SIZE 0x200
+#define X550_RX_BUFFER_SIZE 0x180
static void
ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
* RXPBSIZE
* split rx buffer up into sections, each for 1 traffic class
*/
- pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
+ break;
+ default:
+ pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ break;
+ }
for (i = 0 ; i < nb_tcs; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
* mapping is done with 3 bits per priority,
* so shift by i*3 each time
*/
- queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3));
+ queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
}
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = vmdq_rx_conf->dcb_queue[i];
+ j = vmdq_rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = vmdq_tx_conf->dcb_queue[i];
+ j = vmdq_tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = rx_conf->dcb_queue[i];
+ j = rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = tx_conf->dcb_queue[i];
+ j = tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
IXGBE_MRQC_VMDQRT4TCEN;
else {
+ /* no matter the mode is DCB or DCB_RSS, just
+ * set the MRQE to RSSXTCEN. RSS is controlled
+ * by RSS_FIELD
+ */
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
- IXGBE_MRQC_RT4TCEN;
+ IXGBE_MRQC_RTRSS4TCEN;
}
}
if (dcb_config->num_tcs.pg_tcs == 8) {
else {
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
- IXGBE_MRQC_RT8TCEN;
+ IXGBE_MRQC_RTRSS8TCEN;
}
}
{
int ret = 0;
uint8_t i,pfc_en,nb_tcs;
- uint16_t pbsize;
+ uint16_t pbsize, rx_buffer_size;
uint8_t config_dcb_rx = 0;
uint8_t config_dcb_tx = 0;
uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
*get dcb and VT rx configuration parameters
*from rte_eth_conf
*/
- ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
+ ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
/*Configure general VMDQ and DCB RX parameters*/
ixgbe_vmdq_dcb_configure(dev);
}
break;
case ETH_MQ_RX_DCB:
+ case ETH_MQ_RX_DCB_RSS:
dcb_config->vt_mode = false;
config_dcb_rx = DCB_RX_CONFIG;
/* Get dcb TX configuration parameters from rte_eth_conf */
- ixgbe_dcb_rx_config(dev,dcb_config);
+ ixgbe_dcb_rx_config(dev, dcb_config);
/*Configure general DCB RX parameters*/
ixgbe_dcb_rx_hw_config(hw, dcb_config);
break;
dcb_config->vt_mode = false;
config_dcb_tx = DCB_TX_CONFIG;
/*get DCB TX configuration parameters from rte_eth_conf*/
- ixgbe_dcb_tx_config(dev,dcb_config);
+ ixgbe_dcb_tx_config(dev, dcb_config);
/*Configure general DCB TX parameters*/
ixgbe_dcb_tx_hw_config(hw, dcb_config);
break;
}
}
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ rx_buffer_size = X550_RX_BUFFER_SIZE;
+ break;
+ default:
+ rx_buffer_size = NIC_RX_BUFFER_SIZE;
+ break;
+ }
+
if(config_dcb_rx) {
/* Set RX buffer size */
- pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
for (i = 0 ; i < nb_tcs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
/* Check if the PFC is supported */
if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
- pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs);
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
for (i = 0; i < nb_tcs; i++) {
/*
* If the TC count is 8,and the default high_water is 48,
/* check support mq_mode for DCB */
if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
- (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
+ (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
+ (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
return;
if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
return;
/** Configure DCB hardware **/
- ixgbe_dcb_hw_configure(dev,dcb_cfg);
+ ixgbe_dcb_hw_configure(dev, dcb_cfg);
return;
}
* any DCB/RSS w/o VMDq multi-queue setting
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
- ixgbe_rss_configure(dev);
- break;
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_DCB_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ ixgbe_rss_configure(dev);
+ break;
- case ETH_MQ_RX_VMDQ_DCB:
- ixgbe_vmdq_dcb_configure(dev);
- break;
+ case ETH_MQ_RX_VMDQ_DCB:
+ ixgbe_vmdq_dcb_configure(dev);
+ break;
- case ETH_MQ_RX_VMDQ_ONLY:
- ixgbe_vmdq_rx_hw_configure(dev);
- break;
+ case ETH_MQ_RX_VMDQ_ONLY:
+ ixgbe_vmdq_rx_hw_configure(dev);
+ break;
- case ETH_MQ_RX_NONE:
- /* if mq_mode is none, disable rss mode.*/
- default: ixgbe_rss_disable(dev);
+ case ETH_MQ_RX_NONE:
+ default:
+ /* if mq_mode is none, disable rss mode.*/
+ ixgbe_rss_disable(dev);
+ break;
}
} else {
/*
*/
} else if (adapter->rx_vec_allowed) {
PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
- "burst size no less than 32.");
+ "burst size no less than %d (port=%d).",
+ RTE_IXGBE_DESCS_PER_LOOP,
+ dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
} else if (adapter->rx_bulk_alloc_allowed) {
rte_wmb();
IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
} else
return -1;
ixgbe_rx_queue_release_mbufs(rxq);
ixgbe_reset_rx_queue(adapter, rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
} else
return -1;
rte_wmb();
IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
} else
return -1;
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
} else
return -1;
return 0;
}
+void
+ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct ixgbe_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct ixgbe_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
/*
* [VF] Initializes Receive Unit.
*/