case PKT_TX_UDP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= sizeof(struct rte_udp_hdr)
+ << IXGBE_ADVTXD_L4LEN_SHIFT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
case PKT_TX_TCP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
+ << IXGBE_ADVTXD_L4LEN_SHIFT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
case PKT_TX_SCTP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
+ << IXGBE_ADVTXD_L4LEN_SHIFT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
*/
if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
- rte_errno = -EINVAL;
+ rte_errno = EINVAL;
return i;
}
if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
- rte_errno = -ENOTSUP;
+ rte_errno = ENOTSUP;
return i;
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
return i;
}
#endif
ret = rte_net_intel_cksum_prepare(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
return i;
}
}
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_SCATTER;
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_RSS_HASH;
if (hw->mac.type == ixgbe_mac_82598EB)
offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
struct ixgbe_rx_queue *rxq;
struct ixgbe_hw *hw;
uint16_t len;
- struct ixgbe_adapter *adapter =
- (struct ixgbe_adapter *)dev->data->dev_private;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
uint64_t offloads;
PMD_INIT_FUNC_TRACE();
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
unsigned i;
- struct ixgbe_adapter *adapter =
- (struct ixgbe_adapter *)dev->data->dev_private;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
- adapter = (struct ixgbe_adapter *)dev->data->dev_private;
+ adapter = dev->data->dev_private;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
/* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
uint16_t maxdesc =
- IPV4_MAX_PKT_LEN /
+ RTE_IPV4_MAX_PKT_LEN /
(mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
if (maxdesc >= 16)
ixgbe_set_rx_function(struct rte_eth_dev *dev)
{
uint16_t i, rx_using_sse;
- struct ixgbe_adapter *adapter =
- (struct ixgbe_adapter *)dev->data->dev_private;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
/*
* In order to allow Vector Rx there are a few configuration
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
- struct ixgbe_adapter *adapter =
- (struct ixgbe_adapter *)dev->data->dev_private;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;