return 0;
}
-void enic_del_mac_address(struct enic *enic, int mac_index)
+int enic_del_mac_address(struct enic *enic, int mac_index)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
- if (vnic_dev_del_addr(enic->vdev, mac_addr))
- dev_err(enic, "del mac addr failed\n");
+ return vnic_dev_del_addr(enic->vdev, mac_addr);
}
int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
unsigned int error_interrupt_enable = 1;
unsigned int error_interrupt_offset = 0;
unsigned int rxq_interrupt_enable = 0;
- unsigned int rxq_interrupt_offset;
+ unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
unsigned int index = 0;
unsigned int cq_idx;
struct vnic_rq *data_rq;
- if (enic->rte_dev->data->dev_conf.intr_conf.rxq) {
+ if (enic->rte_dev->data->dev_conf.intr_conf.rxq)
rxq_interrupt_enable = 1;
- rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
- }
+
for (index = 0; index < enic->rq_count; index++) {
cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
enic_cq_wq(enic, index),
error_interrupt_enable,
error_interrupt_offset);
+ /* Compute unsupported ol flags for enic_prep_pkts() */
+ enic->wq[index].tx_offload_notsup_mask =
+ PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
cq_idx = enic_cq_wq(enic, index);
vnic_cq_init(&enic->cq[cq_idx],
struct enic *enic = (struct enic *)priv;
struct enic_memzone_entry *mze;
- rz = rte_memzone_reserve_aligned((const char *)name,
- size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
+ rz = rte_memzone_reserve_aligned((const char *)name, size,
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
if (!rz) {
pr_err("%s : Failed to allocate memory requested for %s\n",
__func__, name);
instance++);
wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
- sizeof(uint32_t),
- SOCKET_ID_ANY, 0,
- ENIC_ALIGN);
+ sizeof(uint32_t), SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
if (!wq->cqmsg_rz)
return -ENOMEM;
static int enic_dev_open(struct enic *enic)
{
int err;
+ int flags = CMD_OPENF_IG_DESCCACHE;
err = enic_dev_wait(enic->vdev, vnic_dev_open,
- vnic_dev_open_done, 0);
+ vnic_dev_open_done, flags);
if (err)
dev_err(enic_get_dev(enic),
"vNIC device open failed, err %d\n", err);
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ /*
+ * 'TCP' is not a typo. HW does not have a separate
+ * enable bit for UDP RSS. The TCP bit enables both TCP
+ * and UDP RSS..
+ */
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+ }
if (rss_hf & ETH_RSS_IPV6)
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+ /* Again, 'TCP' is not a typo. */
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ }
if (rss_hf & ETH_RSS_IPV6_EX)
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6_EX;
if (rss_hf & ETH_RSS_IPV6_TCP_EX)
/* free and reallocate RQs with the new MTU */
for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ if (!rq->in_use)
+ continue;
enic_free_rq(rq);
rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
/* set up link status checking */
vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+ enic->overlay_offload = false;
+ if (!enic->disable_overlay && enic->vxlan &&
+ /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
+ vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_OFFLOAD_ENABLE) == 0) {
+ enic->tx_offload_capa |=
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+ /*
+ * Do not add PKT_TX_OUTER_{IPV4,IPV6} as they are not
+ * 'offload' flags (i.e. not part of PKT_TX_OFFLOAD_MASK).
+ */
+ enic->tx_offload_mask |=
+ PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_MASK;
+ enic->overlay_offload = true;
+ dev_info(enic, "Overlay offload is enabled\n");
+ }
+
return 0;
}