RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
+ RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
};
#undef RTE_TX_OFFLOAD_BIT2STR
}
mbp_buf_size = rte_pktmbuf_data_room_size(mp);
- if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
+ if (mbp_buf_size < dev_info.min_rx_bufsize + RTE_PKTMBUF_HEADROOM) {
RTE_ETHDEV_LOG(ERR,
"%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
mp->name, (int)mbp_buf_size,
int
rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
{
+ struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev;
int ret = 0;
int mask = 0;
int cur, org = 0;
uint64_t orig_offloads;
uint64_t dev_offloads;
+ uint64_t new_offloads;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (mask == 0)
return ret;
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
+
+ /* Rx VLAN offloading must be within its device capabilities */
+ if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
+ new_offloads = dev_offloads & ~orig_offloads;
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%u requested new added VLAN offloads "
+ "0x%" PRIx64 " must be within Rx offloads capabilities "
+ "0x%" PRIx64 " in %s()\n",
+ port_id, new_offloads, dev_info.rx_offload_capa,
+ __func__);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
dev->data->dev_conf.rxmode.offloads = dev_offloads;
ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
return fd;
}
+static inline int
+eth_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
+ const char *ring_name)
+{
+ return snprintf(name, len, "eth_p%d_q%d_%s",
+ port_id, queue_id, ring_name);
+}
+
const struct rte_memzone *
rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
uint16_t queue_id, size_t size, unsigned align,
const struct rte_memzone *mz;
int rc;
- rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
- dev->data->port_id, queue_id, ring_name);
+ rc = eth_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
+ queue_id, ring_name);
if (rc >= RTE_MEMZONE_NAMESIZE) {
RTE_ETHDEV_LOG(ERR, "ring name too long\n");
rte_errno = ENAMETOOLONG;
RTE_MEMZONE_IOVA_CONTIG, align);
}
+int
+rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
+ uint16_t queue_id)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ int rc = 0;
+
+ rc = eth_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
+ queue_id, ring_name);
+ if (rc >= RTE_MEMZONE_NAMESIZE) {
+ RTE_ETHDEV_LOG(ERR, "ring name too long\n");
+ return -ENAMETOOLONG;
+ }
+
+ mz = rte_memzone_lookup(z_name);
+ if (mz)
+ rc = rte_memzone_free(mz);
+ else
+ rc = -ENOENT;
+
+ return rc;
+}
+
int
rte_eth_dev_create(struct rte_device *device, const char *name,
size_t priv_data_size,