uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
+uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
static inline uint64_t
-ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid)
-{
- static uint64_t *ol_flag_map[] = {
- [ICE_RXDID_COMMS_AUX_VLAN] =
- &rte_net_ice_dynflag_proto_xtr_vlan_mask,
- [ICE_RXDID_COMMS_AUX_IPV4] =
- &rte_net_ice_dynflag_proto_xtr_ipv4_mask,
- [ICE_RXDID_COMMS_AUX_IPV6] =
- &rte_net_ice_dynflag_proto_xtr_ipv6_mask,
- [ICE_RXDID_COMMS_AUX_IPV6_FLOW] =
- &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask,
- [ICE_RXDID_COMMS_AUX_TCP] =
- &rte_net_ice_dynflag_proto_xtr_tcp_mask,
+ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid)
+{
+ static struct {
+ uint64_t *ol_flag;
+ bool chk_valid;
+ } ol_flag_map[] = {
+ [ICE_RXDID_COMMS_AUX_VLAN] = {
+ &rte_net_ice_dynflag_proto_xtr_vlan_mask, true },
+ [ICE_RXDID_COMMS_AUX_IPV4] = {
+ &rte_net_ice_dynflag_proto_xtr_ipv4_mask, true },
+ [ICE_RXDID_COMMS_AUX_IPV6] = {
+ &rte_net_ice_dynflag_proto_xtr_ipv6_mask, true },
+ [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = {
+ &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, true },
+ [ICE_RXDID_COMMS_AUX_TCP] = {
+ &rte_net_ice_dynflag_proto_xtr_tcp_mask, true },
+ [ICE_RXDID_COMMS_AUX_IP_OFFSET] = {
+ &rte_net_ice_dynflag_proto_xtr_ip_offset_mask, false },
};
uint64_t *ol_flag;
- ol_flag = rxdid < RTE_DIM(ol_flag_map) ? ol_flag_map[rxdid] : NULL;
+ if (rxdid < RTE_DIM(ol_flag_map)) {
+ ol_flag = ol_flag_map[rxdid].ol_flag;
+ if (!ol_flag)
+ return 0ULL;
- return ol_flag != NULL ? *ol_flag : 0ULL;
+ *chk_valid = ol_flag_map[rxdid].chk_valid;
+ return *ol_flag;
+ }
+
+ return 0ULL;
}
static inline uint8_t
[PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
[PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
[PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
+ [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
};
return xtr_type < RTE_DIM(rxdid_map) ?
int err;
struct ice_vsi *vsi;
struct ice_hw *hw;
- struct ice_aqc_add_tx_qgrp txq_elem;
+ struct ice_aqc_add_tx_qgrp *txq_elem;
struct ice_tlan_ctx tx_ctx;
+ int buf_len;
PMD_INIT_FUNC_TRACE();
return -EINVAL;
}
+ buf_len = ice_struct_size(txq_elem, txqs, 1);
+ txq_elem = ice_malloc(hw, buf_len);
+ if (!txq_elem)
+ return -ENOMEM;
+
vsi = txq->vsi;
hw = ICE_VSI_TO_HW(vsi);
- memset(&txq_elem, 0, sizeof(txq_elem));
memset(&tx_ctx, 0, sizeof(tx_ctx));
- txq_elem.num_txqs = 1;
- txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
+ txq_elem->num_txqs = 1;
+ txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->nb_tx_desc;
tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
- ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
+ ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
ice_tlan_ctx_info);
txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
/* Fix me, we assume TC always 0 here */
err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
- &txq_elem, sizeof(txq_elem), NULL);
+ txq_elem, buf_len, NULL);
if (err) {
PMD_DRV_LOG(ERR, "Failed to add lan txq");
+ rte_free(txq_elem);
return -EIO;
}
/* store the schedule node id */
- txq->q_teid = txq_elem.txqs[0].q_teid;
+ txq->q_teid = txq_elem->txqs[0].q_teid;
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ rte_free(txq_elem);
return 0;
}
int err;
struct ice_vsi *vsi;
struct ice_hw *hw;
- struct ice_aqc_add_tx_qgrp txq_elem;
+ struct ice_aqc_add_tx_qgrp *txq_elem;
struct ice_tlan_ctx tx_ctx;
+ int buf_len;
PMD_INIT_FUNC_TRACE();
return -EINVAL;
}
+ buf_len = ice_struct_size(txq_elem, txqs, 1);
+ txq_elem = ice_malloc(hw, buf_len);
+ if (!txq_elem)
+ return -ENOMEM;
+
vsi = txq->vsi;
hw = ICE_VSI_TO_HW(vsi);
- memset(&txq_elem, 0, sizeof(txq_elem));
memset(&tx_ctx, 0, sizeof(tx_ctx));
- txq_elem.num_txqs = 1;
- txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
+ txq_elem->num_txqs = 1;
+ txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->nb_tx_desc;
tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
- ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
+ ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
ice_tlan_ctx_info);
txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
/* Fix me, we assume TC always 0 here */
err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
- &txq_elem, sizeof(txq_elem), NULL);
+ txq_elem, buf_len, NULL);
if (err) {
PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
+ rte_free(txq_elem);
return -EIO;
}
/* store the schedule node id */
- txq->q_teid = txq_elem.txqs[0].q_teid;
+ txq->q_teid = txq_elem->txqs[0].q_teid;
+ rte_free(txq_elem);
return 0;
}
volatile struct ice_32b_rx_flex_desc_comms *desc)
{
uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
- uint32_t metadata;
+ uint32_t metadata = 0;
uint64_t ol_flag;
+ bool chk_valid;
- if (unlikely(!(stat_err & ICE_RX_PROTO_XTR_VALID)))
- return;
-
- ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid);
+ ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid);
if (unlikely(!ol_flag))
return;
- mb->ol_flags |= ol_flag;
+ if (chk_valid) {
+ if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
- metadata = stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) ?
- rte_le_to_cpu_16(desc->flex_ts.flex.aux0) : 0;
+ if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
+ metadata |=
+ rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
+ } else {
+ if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF)
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
+ else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF)
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
+ }
+
+ if (!metadata)
+ return;
- if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)))
- metadata |= rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
+ mb->ol_flags |= ol_flag;
*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
}
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
- *td_offset |= (tx_offload.l4_len >> 2) <<
+ *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
case PKT_TX_SCTP_CKSUM:
return count;
}
-/* Calculate TCP header length for PKT_TX_TCP_CKSUM if not provided */
-static inline uint16_t
-ice_calc_pkt_tcp_hdr(struct rte_mbuf *tx_pkt, union ice_tx_offload tx_offload)
-{
- uint16_t tcpoff = tx_offload.l2_len + tx_offload.l3_len;
- const struct rte_tcp_hdr *tcp_hdr;
- struct rte_tcp_hdr _tcp_hdr;
-
- if (tcpoff + sizeof(struct rte_tcp_hdr) < tx_pkt->data_len) {
- tcp_hdr = rte_pktmbuf_mtod_offset(tx_pkt, struct rte_tcp_hdr *,
- tcpoff);
-
- return (tcp_hdr->data_off & 0xf0) >> 2;
- }
-
- tcp_hdr = rte_pktmbuf_read(tx_pkt, tcpoff, sizeof(_tcp_hdr), &_tcp_hdr);
- if (tcp_hdr)
- return (tcp_hdr->data_off & 0xf0) >> 2;
- else
- return 0;
-}
-
uint16_t
ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
tx_pkt = *tx_pkts++;
td_cmd = 0;
+ td_tag = 0;
+ td_offset = 0;
ol_flags = tx_pkt->ol_flags;
tx_offload.l2_len = tx_pkt->l2_len;
tx_offload.l3_len = tx_pkt->l3_len;
&cd_tunneling_params);
/* Enable checksum offloading */
- if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM &&
- !tx_offload.l4_len)
- tx_offload.l4_len =
- ice_calc_pkt_tcp_hdr(tx_pkt, tx_offload);
-
+ if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
ice_txd_enable_checksum(ol_flags, &td_cmd,
&td_offset, tx_offload);
- }
if (nb_ctx) {
/* Setup TX context descriptor if required */