#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
#define E1000_TX_OFFLOAD_MASK ( \
+ PKT_TX_IPV6 | \
+ PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_VLAN_PKT)
/* setup IPCS* fields */
ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len;
ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len +
- offsetof(struct ipv4_hdr, hdr_checksum));
+ offsetof(struct rte_ipv4_hdr, hdr_checksum));
/*
* When doing checksum or TCP segmentation with IPv6 headers,
switch (flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
- offsetof(struct udp_hdr, dgram_cksum));
+ offsetof(struct rte_udp_hdr, dgram_cksum));
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
break;
case PKT_TX_TCP_CKSUM:
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
- offsetof(struct tcp_hdr, cksum));
+ offsetof(struct rte_tcp_hdr, cksum));
cmd_len |= E1000_TXD_CMD_TCP;
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
break;
m = tx_pkts[i];
if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) {
- rte_errno = -ENOTSUP;
+ rte_errno = ENOTSUP;
return i;
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
return i;
}
#endif
ret = rte_net_intel_cksum_prepare(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
return i;
}
}
*/
rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt_len -= ETHER_CRC_LEN;
- if (data_len <= ETHER_CRC_LEN) {
+ first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+ if (data_len <= RTE_ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
first_seg->nb_segs--;
last_seg->data_len = (uint16_t)
(last_seg->data_len -
- (ETHER_CRC_LEN - data_len));
+ (RTE_ETHER_CRC_LEN - data_len));
last_seg->next = NULL;
} else
- rxm->data_len =
- (uint16_t) (data_len - ETHER_CRC_LEN);
+ rxm->data_len = (uint16_t)
+ (data_len - RTE_ETHER_CRC_LEN);
}
/*
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER;
- if (max_rx_pktlen > ETHER_MAX_LEN)
+ if (max_rx_pktlen > RTE_ETHER_MAX_LEN)
rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
return rx_offload_capa;
}
/*
- * EM devices don't support drop_en functionality
+ * EM devices don't support drop_en functionality.
+ * It's an optimization that does nothing on single-queue devices,
+ * so just log the issue and carry on.
*/
if (rx_conf->rx_drop_en) {
- PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
+ PMD_INIT_LOG(NOTICE, "drop_en functionality not supported by "
"device");
- return -EINVAL;
}
/* Free memory prior to re-allocation if needed. */
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
* call to configure
*/
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
* one buffer.
*/
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
- rctl_bsize < ETHER_MAX_LEN) {
+ rctl_bsize < RTE_ETHER_MAX_LEN) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst =
tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
+ /* SPT and CNP Si errata workaround to avoid data corruption */
+ if (hw->mac.type == e1000_pch_spt) {
+ uint32_t reg_val;
+ reg_val = E1000_READ_REG(hw, E1000_IOSFPC);
+ reg_val |= E1000_RCTL_RDMTS_HEX;
+ E1000_WRITE_REG(hw, E1000_IOSFPC, reg_val);
+
+ /* Dropping the number of outstanding requests from
+ * 3 to 2 in order to avoid a buffer overrun.
+ */
+ reg_val = E1000_READ_REG(hw, E1000_TARC(0));
+ reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
+ reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
+ E1000_WRITE_REG(hw, E1000_TARC(0), reg_val);
+ }
+
/* This write will effectively turn on the transmit unit. */
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
}