{
size_t cqe_mem_size;
- /* allocate continuous cqe memory for saving number of memory zone */
cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
rxq->cqe_start_vaddr =
dma_zalloc_coherent(rxq->nic_dev->hwdev,
if (nic_dev->rxqs[q_id] == NULL)
continue;
- hinic_free_all_rx_skbs(nic_dev->rxqs[q_id]);
+ hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]);
hinic_free_rx_resources(nic_dev->rxqs[q_id]);
kfree(nic_dev->rxqs[q_id]);
nic_dev->rxqs[q_id] = NULL;
u16 q_id;
for (q_id = 0; q_id < nic_dev->num_rq; q_id++)
- hinic_free_all_rx_skbs(nic_dev->rxqs[q_id]);
+ hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]);
}
static void hinic_recv_jumbo_pkt(struct hinic_rxq *rxq,
- struct rte_mbuf *head_skb,
+ struct rte_mbuf *head_mbuf,
u32 remain_pkt_len)
{
struct hinic_nic_dev *nic_dev = rxq->nic_dev;
cur_mbuf->data_len = (u16)pkt_len;
cur_mbuf->next = NULL;
- head_skb->pkt_len += cur_mbuf->data_len;
- head_skb->nb_segs++;
+ head_mbuf->pkt_len += cur_mbuf->data_len;
+ head_mbuf->nb_segs++;
if (!rxm)
- head_skb->next = cur_mbuf;
+ head_mbuf->next = cur_mbuf;
else
rxm->next = cur_mbuf;
struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
struct rte_eth_rss_conf rss_conf =
dev->data->dev_conf.rx_adv_conf.rss_conf;
- u32 csum_en = 0;
int err;
if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
/* Enable both L3/L4 rx checksum offload */
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
- csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
+ nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
- err = hinic_set_rx_csum_offload(nic_dev->hwdev, csum_en);
+ err = hinic_set_rx_csum_offload(nic_dev->hwdev,
+ HINIC_RX_CSUM_OFFLOAD_EN);
if (err)
goto rx_csum_ofl_err;
}
}
-void hinic_free_all_rx_skbs(struct hinic_rxq *rxq)
+void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq)
{
struct hinic_nic_dev *nic_dev = rxq->nic_dev;
struct hinic_rx_info *rx_info;
{
uint32_t checksum_err;
uint64_t flags;
+ struct hinic_nic_dev *nic_dev = rxq->nic_dev;
+
+ if (unlikely(!(nic_dev->rx_csum_en & HINIC_RX_CSUM_OFFLOAD_EN)))
+ return PKT_RX_IP_CKSUM_UNKNOWN;
/* most case checksum is ok */
checksum_err = HINIC_GET_RX_CSUM_ERR(status);
rxm->data_len = rx_buf_len;
rxm->pkt_len = rx_buf_len;
- /* if jumbo use multi-wqebb update ci,
- * recv_jumbo_pkt will also update ci
+ /* if receive jumbo, updating ci will be done by
+ * hinic_recv_jumbo_pkt function.
*/
HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt + 1);
wqebb_cnt = 0;
#include "hinic_pmd_tx.h"
/* packet header and tx offload info */
+#define ETHER_LEN_NO_VLAN 14
+#define ETHER_LEN_WITH_VLAN 18
+#define HEADER_LEN_OFFSET 2
#define VXLANLEN 8
#define MAX_PLD_OFFSET 221
#define MAX_SINGLE_SGE_SIZE 65536
#define HINIC_TSO_PKT_MAX_SGE 127 /* tso max sge 127 */
#define HINIC_TSO_SEG_NUM_INVALID(num) ((num) > HINIC_TSO_PKT_MAX_SGE)
+#define HINIC_TX_OUTER_CHECKSUM_FLAG_SET 1
+#define HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET 0
+
/* sizeof(struct hinic_sq_bufdesc) == 16, shift 4 */
#define HINIC_BUF_DESC_SIZE(nr_descs) (SIZE_8BYTES(((u32)nr_descs) << 4))
hinic_set_l4_csum_info(struct hinic_sq_task *task,
u32 *queue_info, struct hinic_tx_offload_info *poff_info)
{
- u32 tcp_udp_cs, sctp;
+ u32 tcp_udp_cs, sctp = 0;
u16 l2hdr_len;
- sctp = 0;
if (unlikely(poff_info->inner_l4_type == SCTP_OFFLOAD_ENABLE))
sctp = 1;
tcp_udp_cs = poff_info->inner_l4_tcp_udp;
- if (poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) {
+ if (poff_info->tunnel_type == TUNNEL_UDP_CSUM ||
+ poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) {
l2hdr_len = poff_info->outer_l2_len;
task->pkt_info2 |=
return (struct hinic_sq_wqe *)WQ_WQE_ADDR(wq, cur_pi);
}
-static inline int
-hinic_validate_tx_offload(const struct rte_mbuf *m)
-{
- uint64_t ol_flags = m->ol_flags;
- uint64_t inner_l3_offset = m->l2_len;
-
- /* just support vxlan offload */
- if ((ol_flags & PKT_TX_TUNNEL_MASK) &&
- !(ol_flags & PKT_TX_TUNNEL_VXLAN))
- return -ENOTSUP;
-
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
- inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
-
- /* Headers are fragmented */
- if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
- return -ENOTSUP;
-
- /* IP checksum can be counted only for IPv4 packet */
- if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
- return -EINVAL;
-
- /* IP type not set when required */
- if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
- if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
- return -EINVAL;
- }
-
- /* Check requirements for TSO packet */
- if (ol_flags & PKT_TX_TCP_SEG) {
- if (m->tso_segsz == 0 ||
- ((ol_flags & PKT_TX_IPV4) &&
- !(ol_flags & PKT_TX_IP_CKSUM)))
- return -EINVAL;
- }
-
- /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
- if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
- !(ol_flags & PKT_TX_OUTER_IPV4))
- return -EINVAL;
-
- return 0;
-}
-
static inline uint16_t
hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
{
return __rte_raw_cksum_reduce(sum);
}
+static inline void
+hinic_get_pld_offset(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info,
+ int outer_cs_flag)
+{
+ uint64_t ol_flags = m->ol_flags;
+
+ if (outer_cs_flag == 1) {
+ if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
+ off_info->payload_offset = m->outer_l2_len +
+ m->outer_l3_len + m->l2_len + m->l3_len;
+ } else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
+ (ol_flags & PKT_TX_TCP_SEG)) {
+ off_info->payload_offset = m->outer_l2_len +
+ m->outer_l3_len + m->l2_len +
+ m->l3_len + m->l4_len;
+ }
+ } else {
+ if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
+ off_info->payload_offset = m->l2_len + m->l3_len;
+ } else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
+ (ol_flags & PKT_TX_TCP_SEG)) {
+ off_info->payload_offset = m->l2_len + m->l3_len +
+ m->l4_len;
+ }
+ }
+}
+
+static inline void
+hinic_analyze_tx_info(struct rte_mbuf *mbuf,
+ struct hinic_tx_offload_info *off_info)
+{
+ struct rte_ether_hdr *eth_hdr;
+ struct rte_vlan_hdr *vlan_hdr;
+ struct rte_ipv4_hdr *ip4h;
+ u16 pkt_type;
+ u8 *hdr;
+
+ hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*);
+ eth_hdr = (struct rte_ether_hdr *)hdr;
+ pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+ if (pkt_type == RTE_ETHER_TYPE_VLAN) {
+ off_info->outer_l2_len = ETHER_LEN_WITH_VLAN;
+ vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1);
+ pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+ } else {
+ off_info->outer_l2_len = ETHER_LEN_NO_VLAN;
+ }
+
+ if (pkt_type == RTE_ETHER_TYPE_IPV4) {
+ ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len);
+ off_info->outer_l3_len = (ip4h->version_ihl & 0xf) <<
+ HEADER_LEN_OFFSET;
+ } else if (pkt_type == RTE_ETHER_TYPE_IPV6) {
+ /* not support ipv6 extension header */
+ off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr);
+ }
+}
+
static inline int
hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
struct hinic_tx_offload_info *off_info)
struct rte_ether_hdr *eth_hdr;
struct rte_vlan_hdr *vlan_hdr;
u16 eth_type = 0;
- uint64_t inner_l3_offset = m->l2_len;
+ uint64_t inner_l3_offset;
uint64_t ol_flags = m->ol_flags;
- /* Does packet set any of available offloads */
+ /* Check if the packets set available offload flags */
if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK))
return 0;
- if (unlikely(hinic_validate_tx_offload(m)))
+ /* Support only vxlan offload */
+ if ((ol_flags & PKT_TX_TUNNEL_MASK) &&
+ !(ol_flags & PKT_TX_TUNNEL_VXLAN))
+ return -ENOTSUP;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ if (rte_validate_tx_offload(m) != 0)
return -EINVAL;
+#endif
- if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
- (ol_flags & PKT_TX_OUTER_IPV6) ||
- (ol_flags & PKT_TX_TUNNEL_VXLAN)) {
- inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
- off_info->outer_l2_len = m->outer_l2_len;
- off_info->outer_l3_len = m->outer_l3_len;
- /* just support vxlan tunneling pkt */
- off_info->inner_l2_len = m->l2_len - VXLANLEN -
- sizeof(struct rte_udp_hdr);
- off_info->inner_l3_len = m->l3_len;
- off_info->inner_l4_len = m->l4_len;
- off_info->tunnel_length = m->l2_len;
- off_info->payload_offset = m->outer_l2_len +
- m->outer_l3_len + m->l2_len + m->l3_len;
- off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+ if (ol_flags & PKT_TX_TUNNEL_VXLAN) {
+ if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
+ (ol_flags & PKT_TX_OUTER_IPV6)) {
+ inner_l3_offset = m->l2_len + m->outer_l2_len +
+ m->outer_l3_len;
+ off_info->outer_l2_len = m->outer_l2_len;
+ off_info->outer_l3_len = m->outer_l3_len;
+ /* just support vxlan tunneling pkt */
+ off_info->inner_l2_len = m->l2_len - VXLANLEN -
+ sizeof(*udp_hdr);
+ off_info->inner_l3_len = m->l3_len;
+ off_info->inner_l4_len = m->l4_len;
+ off_info->tunnel_length = m->l2_len;
+ off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+ hinic_get_pld_offset(m, off_info,
+ HINIC_TX_OUTER_CHECKSUM_FLAG_SET);
+ } else {
+ inner_l3_offset = m->l2_len;
+ hinic_analyze_tx_info(m, off_info);
+ /* just support vxlan tunneling pkt */
+ off_info->inner_l2_len = m->l2_len - VXLANLEN -
+ sizeof(*udp_hdr) - off_info->outer_l2_len -
+ off_info->outer_l3_len;
+ off_info->inner_l3_len = m->l3_len;
+ off_info->inner_l4_len = m->l4_len;
+ off_info->tunnel_length = m->l2_len -
+ off_info->outer_l2_len - off_info->outer_l3_len;
+ off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+ hinic_get_pld_offset(m, off_info,
+ HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+ }
} else {
+ inner_l3_offset = m->l2_len;
off_info->inner_l2_len = m->l2_len;
off_info->inner_l3_len = m->l3_len;
off_info->inner_l4_len = m->l4_len;
off_info->tunnel_type = NOT_TUNNEL;
- off_info->payload_offset = m->l2_len + m->l3_len;
- }
- if (((ol_flags & PKT_TX_L4_MASK) != PKT_TX_SCTP_CKSUM) &&
- ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM))
- off_info->payload_offset += m->l4_len;
+ hinic_get_pld_offset(m, off_info,
+ HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+ }
/* invalid udp or tcp header */
if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET))
if ((ol_flags & PKT_TX_TUNNEL_VXLAN) && ((ol_flags & PKT_TX_TCP_SEG) ||
(ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
(ol_flags & PKT_TX_OUTER_IPV6))) {
- off_info->tunnel_type = TUNNEL_UDP_CSUM;
/* inner_l4_tcp_udp csum should be setted to calculate outter
* udp checksum when vxlan packets without inner l3 and l4
udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
m->outer_l3_len);
- udp_hdr->dgram_cksum =
- hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+ udp_hdr->dgram_cksum = 0;
} else if (eth_type == RTE_ETHER_TYPE_IPV6) {
off_info->outer_l3_type = IPV6_PKT;
ipv6_hdr =
rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
(m->outer_l2_len +
m->outer_l3_len));
- udp_hdr->dgram_cksum =
- hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+ udp_hdr->dgram_cksum = 0;
}
+ } else if (ol_flags & PKT_TX_OUTER_IPV4) {
+ off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+ off_info->inner_l4_tcp_udp = 1;
+ off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
}
if (ol_flags & PKT_TX_IPV4)
off_info->inner_l4_type = UDP_OFFLOAD_ENABLE;
off_info->inner_l4_tcp_udp = 1;
- off_info->inner_l4_len = sizeof(struct rte_udp_hdr);
} else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
(ol_flags & PKT_TX_TCP_SEG)) {
if (ol_flags & PKT_TX_IPV4) {
return nb_tx;
}
-void hinic_free_all_tx_skbs(struct hinic_txq *txq)
+void hinic_free_all_tx_mbufs(struct hinic_txq *txq)
{
u16 ci;
struct hinic_nic_dev *nic_dev = txq->nic_dev;
continue;
/* stop tx queue free tx mbuf */
- hinic_free_all_tx_skbs(nic_dev->txqs[q_id]);
+ hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]);
hinic_free_tx_resources(nic_dev->txqs[q_id]);
/* free txq */
for (q_id = 0; q_id < nic_dev->num_sq; q_id++)
/* stop tx queue free tx mbuf */
- hinic_free_all_tx_skbs(nic_dev->txqs[q_id]);
+ hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]);
}
int hinic_setup_tx_resources(struct hinic_txq *txq)