uint16_t group_id; /**< Group ID of KNI devices */
uint32_t slot_id; /**< KNI pool slot ID */
struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */
- unsigned mbuf_size; /**< mbuf size */
+ unsigned int mbuf_size; /**< mbuf size */
const struct rte_memzone *m_tx_q; /**< TX queue memzone */
const struct rte_memzone *m_rx_q; /**< RX queue memzone */
/* For request & response */
struct rte_kni_fifo *req_q; /**< Request queue */
struct rte_kni_fifo *resp_q; /**< Response queue */
- void * sync_addr; /**< Req/Resp Mem address */
+ void *sync_addr; /**< Req/Resp Mem address */
struct rte_kni_ops ops; /**< operations for request */
};
int
rte_kni_init(unsigned int max_kni_ifaces __rte_unused)
{
+ if (rte_eal_iova_mode() != RTE_IOVA_PA) {
+ RTE_LOG(ERR, KNI, "KNI requires IOVA as PA\n");
+ return -1;
+ }
+
/* Check FD and open */
if (kni_fd < 0) {
kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
return NULL;
}
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
kni = __rte_kni_get(conf->name);
if (kni != NULL) {
kni->ops.port_id = UINT16_MAX;
memset(&dev_info, 0, sizeof(dev_info));
- dev_info.bus = conf->addr.bus;
- dev_info.devid = conf->addr.devid;
- dev_info.function = conf->addr.function;
- dev_info.vendor_id = conf->id.vendor_id;
- dev_info.device_id = conf->id.device_id;
dev_info.core_id = conf->core_id;
dev_info.force_bind = conf->force_bind;
dev_info.group_id = conf->group_id;
dev_info.mbuf_size = conf->mbuf_size;
dev_info.mtu = conf->mtu;
- memcpy(dev_info.mac_addr, conf->mac_addr, ETHER_ADDR_LEN);
+ memcpy(dev_info.mac_addr, conf->mac_addr, RTE_ETHER_ADDR_LEN);
strlcpy(dev_info.name, conf->name, RTE_KNI_NAMESIZE);
- RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
- dev_info.bus, dev_info.devid, dev_info.function,
- dev_info.vendor_id, dev_info.device_id);
-
ret = kni_reserve_mz(kni);
if (ret < 0)
goto mz_fail;
kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
TAILQ_INSERT_TAIL(kni_list, te, next);
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
/* Allocate mbufs and then put them into alloc_q */
kni_allocate_mbufs(kni);
kni_fail:
rte_free(te);
unlock:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
return NULL;
}
(unsigned long)m->buf_iova));
}
+static void *
+va2pa_all(struct rte_mbuf *mbuf)
+{
+ void *phy_mbuf = va2pa(mbuf);
+ struct rte_mbuf *next = mbuf->next;
+ while (next) {
+ mbuf->next = va2pa(next);
+ mbuf = next;
+ next = mbuf->next;
+ }
+ return phy_mbuf;
+}
+
static void
obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj,
unsigned obj_idx __rte_unused)
kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
TAILQ_FOREACH(te, kni_list, next) {
if (te->data == kni)
TAILQ_REMOVE(kni_list, te, next);
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
/* mbufs in all fifo should be released, except request/response */
return 0;
unlock:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
return -1;
}
static int
kni_config_promiscusity(uint16_t port_id, uint8_t to_on)
{
+ int ret;
+
if (!rte_eth_dev_is_valid_port(port_id)) {
RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
return -EINVAL;
port_id, to_on);
if (to_on)
- rte_eth_promiscuous_enable(port_id);
+ ret = rte_eth_promiscuous_enable(port_id);
else
- rte_eth_promiscuous_disable(port_id);
+ ret = rte_eth_promiscuous_disable(port_id);
+
+ if (ret != 0)
+ RTE_LOG(ERR, KNI,
+ "Failed to %s promiscuous mode for port %u: %s\n",
+ to_on ? "enable" : "disable", port_id,
+ rte_strerror(-ret));
+
+ return ret;
+}
+
+/* default callback for request of configuring allmulticast mode */
+static int
+kni_config_allmulticast(uint16_t port_id, uint8_t to_on)
+{
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
+ return -EINVAL;
+ }
+
+ RTE_LOG(INFO, KNI, "Configure allmulticast mode of %d to %d\n",
+ port_id, to_on);
+
+ if (to_on)
+ rte_eth_allmulticast_enable(port_id);
+ else
+ rte_eth_allmulticast_disable(port_id);
return 0;
}
int
rte_kni_handle_request(struct rte_kni *kni)
{
- unsigned ret;
+ unsigned int ret;
struct rte_kni_request *req = NULL;
if (kni == NULL)
break;
case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */
if (kni->ops.config_network_if)
- req->result = kni->ops.config_network_if(\
- kni->ops.port_id, req->if_up);
+ req->result = kni->ops.config_network_if(kni->ops.port_id,
+ req->if_up);
break;
case RTE_KNI_REQ_CHANGE_MAC_ADDR: /* Change MAC Address */
if (kni->ops.config_mac_address)
req->result = kni_config_promiscusity(
kni->ops.port_id, req->promiscusity);
break;
+ case RTE_KNI_REQ_CHANGE_ALLMULTI: /* Change ALLMULTICAST MODE */
+ if (kni->ops.config_allmulticast)
+ req->result = kni->ops.config_allmulticast(
+ kni->ops.port_id, req->allmulti);
+ else if (kni->ops.port_id != UINT16_MAX)
+ req->result = kni_config_allmulticast(
+ kni->ops.port_id, req->allmulti);
+ break;
default:
RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id);
req->result = -EINVAL;
}
unsigned
-rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
+rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)
{
+ num = RTE_MIN(kni_fifo_free_count(kni->rx_q), num);
void *phy_mbufs[num];
unsigned int ret;
unsigned int i;
for (i = 0; i < num; i++)
- phy_mbufs[i] = va2pa(mbufs[i]);
+ phy_mbufs[i] = va2pa_all(mbufs[i]);
ret = kni_fifo_put(kni->rx_q, phy_mbufs, num);
}
unsigned
-rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
+rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)
{
- unsigned ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num);
+ unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num);
/* If buffers removed, allocate mbufs and then put them into alloc_q */
if (ret)
return;
}
- allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1) \
+ allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1)
& (MAX_MBUF_BURST_NUM - 1);
for (i = 0; i < allocq_free; i++) {
pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
if (name == NULL || name[0] == '\0')
return NULL;
- rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_read_lock();
kni = __rte_kni_get(name);
- rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_read_unlock();
return kni;
}
kni_check_request_register(struct rte_kni_ops *ops)
{
/* check if KNI request ops has been registered*/
- if( NULL == ops )
+ if (ops == NULL)
return KNI_REQ_NO_REGISTER;
- if ((ops->change_mtu == NULL)
- && (ops->config_network_if == NULL)
- && (ops->config_mac_address == NULL)
- && (ops->config_promiscusity == NULL))
+ if (ops->change_mtu == NULL
+ && ops->config_network_if == NULL
+ && ops->config_mac_address == NULL
+ && ops->config_promiscusity == NULL
+ && ops->config_allmulticast == NULL)
return KNI_REQ_NO_REGISTER;
return KNI_REQ_REGISTERED;
}
int
-rte_kni_register_handlers(struct rte_kni *kni,struct rte_kni_ops *ops)
+rte_kni_register_handlers(struct rte_kni *kni, struct rte_kni_ops *ops)
{
enum kni_ops_status req_status;
- if (NULL == ops) {
+ if (ops == NULL) {
RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n");
return -1;
}
- if (NULL == kni) {
+ if (kni == NULL) {
RTE_LOG(ERR, KNI, "Invalid kni info.\n");
return -1;
}
req_status = kni_check_request_register(&kni->ops);
- if ( KNI_REQ_REGISTERED == req_status) {
+ if (req_status == KNI_REQ_REGISTERED) {
RTE_LOG(ERR, KNI, "The KNI request operation has already registered.\n");
return -1;
}
int
rte_kni_unregister_handlers(struct rte_kni *kni)
{
- if (NULL == kni) {
+ if (kni == NULL) {
RTE_LOG(ERR, KNI, "Invalid kni info.\n");
return -1;
}
return 0;
}
-int __rte_experimental
+int
rte_kni_update_link(struct rte_kni *kni, unsigned int linkup)
{
char path[64];